diff --git "a/data/config.json" "b/data/config.json" new file mode 100644--- /dev/null +++ "b/data/config.json" @@ -0,0 +1,57630 @@ +[ + { + "prompt_type": "plain", + "instruction": ": What does max_runtime_minutes do? : max runtime minutes config.toml: If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_runtime_minutes. : max runtime minutes config.toml: If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. runtime in minutes before triggering the 'Finish' button. Approximately enforced. (0 = disabled): . : Set the max runtime minutes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_runtime_minutes", + "output": "max runtime minutes config.toml: If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_runtime_minutes", + "output": "max runtime minutes config.toml: Max. runtime in minutes before triggering the 'Finish' button. Approximately enforced. (0 = disabled): If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max runtime minutes", + "output": "max runtime minutes config.toml: Max. runtime in minutes before triggering the 'Finish' button. Approximately enforced. (0 = disabled): If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. runtime in minutes before triggering the 'Finish' button. Approximately enforced. (0 = disabled): ", + "output": "max runtime minutes config.toml: Max. runtime in minutes before triggering the 'Finish' button. Approximately enforced. (0 = disabled): If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_runtime_minutes", + "output": "max runtime minutes config.toml: If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_runtime_minutes", + "output": "max runtime minutes config.toml: Max. runtime in minutes before triggering the 'Finish' button. Approximately enforced. (0 = disabled): If the experiment is not done after this many minutes, stop feature engineering and model tuning as soon as possible and proceed with building the final modeling pipeline and deployment artifacts, independent of model score convergence or pre-determined number of iterations. Only active is not in reproducible mode. Depending on the data and experiment settings, overall experiment runtime can differ significantly from this setting." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_auto_runtime_minutes do? : min auto runtime minutes config.toml: if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_auto_runtime_minutes. : min auto runtime minutes config.toml: if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. runtime in minutes for automatic runtime control (0 = disabled): . : Set the min auto runtime minutes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_auto_runtime_minutes", + "output": "min auto runtime minutes config.toml: if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_auto_runtime_minutes", + "output": "min auto runtime minutes config.toml: Min. runtime in minutes for automatic runtime control (0 = disabled): if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min auto runtime minutes", + "output": "min auto runtime minutes config.toml: Min. runtime in minutes for automatic runtime control (0 = disabled): if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. runtime in minutes for automatic runtime control (0 = disabled): ", + "output": "min auto runtime minutes config.toml: Min. runtime in minutes for automatic runtime control (0 = disabled): if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_auto_runtime_minutes", + "output": "min auto runtime minutes config.toml: if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_auto_runtime_minutes", + "output": "min auto runtime minutes config.toml: Min. runtime in minutes for automatic runtime control (0 = disabled): if non-zero, then set max_runtime_minutes automatically to min(max_runtime_minutes, max(min_auto_runtime_minutes, runtime estimate)) when enable_preview_time_estimate is true, so that the preview performs a best estimate of the runtime. Set to zero to disable runtime estimate being used to constrain runtime of experiment." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_runtime_minutes_smart do? : max runtime minutes smart config.toml: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_runtime_minutes_smart. : max runtime minutes smart config.toml: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Smart runtime mode: . : Set the max runtime minutes smart config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_runtime_minutes_smart", + "output": "max runtime minutes smart config.toml: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_runtime_minutes_smart", + "output": "max runtime minutes smart config.toml: Smart runtime mode: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max runtime minutes smart", + "output": "max runtime minutes smart config.toml: Smart runtime mode: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Smart runtime mode: ", + "output": "max runtime minutes smart config.toml: Smart runtime mode: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_runtime_minutes_smart", + "output": "max runtime minutes smart config.toml: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_runtime_minutes_smart", + "output": "max runtime minutes smart config.toml: Smart runtime mode: Whether to tune max_runtime_minutes based upon final number of base models,so try to trigger start of final model in order to better ensure stop entire experiment before max_runtime_minutes.Note: If the time given is short enough that tuning models are reduced belowfinal model expectations, the final model may be shorter than expected leadingto an overall shorter experiment time." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_runtime_minutes_until_abort do? : max runtime minutes until abort config.toml: If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_runtime_minutes_until_abort. : max runtime minutes until abort config.toml: If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. runtime in minutes before triggering the 'Abort' button.(0 = disabled): . : Set the max runtime minutes until abort config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_runtime_minutes_until_abort", + "output": "max runtime minutes until abort config.toml: If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_runtime_minutes_until_abort", + "output": "max runtime minutes until abort config.toml: Max. runtime in minutes before triggering the 'Abort' button.(0 = disabled): If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max runtime minutes until abort", + "output": "max runtime minutes until abort config.toml: Max. runtime in minutes before triggering the 'Abort' button.(0 = disabled): If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. runtime in minutes before triggering the 'Abort' button.(0 = disabled): ", + "output": "max runtime minutes until abort config.toml: Max. runtime in minutes before triggering the 'Abort' button.(0 = disabled): If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_runtime_minutes_until_abort", + "output": "max runtime minutes until abort config.toml: If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_runtime_minutes_until_abort", + "output": "max runtime minutes until abort config.toml: Max. runtime in minutes before triggering the 'Abort' button.(0 = disabled): If the experiment is not done after this many minutes, push the abort button. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made." + }, + { + "prompt_type": "plain", + "instruction": ": What does strict_reproducible_for_max_runtime do? : strict reproducible for max runtime config.toml: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "plain", + "instruction": ": Explain strict_reproducible_for_max_runtime. : strict reproducible for max runtime config.toml: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to disable time-based limits when reproducible is set: . : Set the strict reproducible for max runtime config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "strict_reproducible_for_max_runtime", + "output": "strict reproducible for max runtime config.toml: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "strict_reproducible_for_max_runtime", + "output": "strict reproducible for max runtime config.toml: Whether to disable time-based limits when reproducible is set: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "strict reproducible for max runtime", + "output": "strict reproducible for max runtime config.toml: Whether to disable time-based limits when reproducible is set: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to disable time-based limits when reproducible is set: ", + "output": "strict reproducible for max runtime config.toml: Whether to disable time-based limits when reproducible is set: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting strict_reproducible_for_max_runtime", + "output": "strict reproducible for max runtime config.toml: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting strict_reproducible_for_max_runtime", + "output": "strict reproducible for max runtime config.toml: Whether to disable time-based limits when reproducible is set: If reproducbile is set, then experiment and all artifacts are reproducible, however then experiments may take arbitrarily long for a given choice of dials, features, and models. Setting this to False allows the experiment to complete after a fixed time, with all aspects of the model and feature building are reproducible and seeded, but the overall experiment behavior will not necessarily be reproducible if later iterations would have been used in final model building. This should set to True if every seeded experiment of exact same setup needs to generate the exact same final model, regardless of duration." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_preview_time_estimate do? : enable preview time estimate config.toml: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_preview_time_estimate. : enable preview time estimate config.toml: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to have preview estimate runtime: . : Set the enable preview time estimate config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_preview_time_estimate", + "output": "enable preview time estimate config.toml: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_preview_time_estimate", + "output": "enable preview time estimate config.toml: Whether to have preview estimate runtime: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable preview time estimate", + "output": "enable preview time estimate config.toml: Whether to have preview estimate runtime: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to have preview estimate runtime: ", + "output": "enable preview time estimate config.toml: Whether to have preview estimate runtime: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_preview_time_estimate", + "output": "enable preview time estimate config.toml: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_preview_time_estimate", + "output": "enable preview time estimate config.toml: Whether to have preview estimate runtime: Uses model built on large number of experiments to estimate runtime. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_preview_mojo_size_estimate do? : enable preview mojo size estimate config.toml: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_preview_mojo_size_estimate. : enable preview mojo size estimate config.toml: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to have preview estimate mojo size: . : Set the enable preview mojo size estimate config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_preview_mojo_size_estimate", + "output": "enable preview mojo size estimate config.toml: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_preview_mojo_size_estimate", + "output": "enable preview mojo size estimate config.toml: Whether to have preview estimate mojo size: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable preview mojo size estimate", + "output": "enable preview mojo size estimate config.toml: Whether to have preview estimate mojo size: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to have preview estimate mojo size: ", + "output": "enable preview mojo size estimate config.toml: Whether to have preview estimate mojo size: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_preview_mojo_size_estimate", + "output": "enable preview mojo size estimate config.toml: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_preview_mojo_size_estimate", + "output": "enable preview mojo size estimate config.toml: Whether to have preview estimate mojo size: Uses model built on large number of experiments to estimate mojo size. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_preview_cpu_memory_estimate do? : enable preview cpu memory estimate config.toml: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_preview_cpu_memory_estimate. : enable preview cpu memory estimate config.toml: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to have preview estimate max cpu memory: . : Set the enable preview cpu memory estimate config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_preview_cpu_memory_estimate", + "output": "enable preview cpu memory estimate config.toml: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_preview_cpu_memory_estimate", + "output": "enable preview cpu memory estimate config.toml: Whether to have preview estimate max cpu memory: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable preview cpu memory estimate", + "output": "enable preview cpu memory estimate config.toml: Whether to have preview estimate max cpu memory: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to have preview estimate max cpu memory: ", + "output": "enable preview cpu memory estimate config.toml: Whether to have preview estimate max cpu memory: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_preview_cpu_memory_estimate", + "output": "enable preview cpu memory estimate config.toml: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_preview_cpu_memory_estimate", + "output": "enable preview cpu memory estimate config.toml: Whether to have preview estimate max cpu memory: Uses model built on large number of experiments to estimate max cpu memory. It can be inaccurate in cases that were not trained on." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_abort do? : time abort config.toml: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_abort. : time abort config.toml: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Time to trigger the 'Abort' button.: . : Set the time abort config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_abort", + "output": "time abort config.toml: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_abort", + "output": "time abort config.toml: Time to trigger the 'Abort' button.: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time abort", + "output": "time abort config.toml: Time to trigger the 'Abort' button.: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Time to trigger the 'Abort' button.: ", + "output": "time abort config.toml: Time to trigger the 'Abort' button.: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_abort", + "output": "time abort config.toml: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_abort", + "output": "time abort config.toml: Time to trigger the 'Abort' button.: If the experiment is not done by this time, push the abort button. Accepts time in format given by time_abort_format (defaults to %Y-%m-%d %H:%M:%S)assuming a time zone set by time_abort_timezone (defaults to UTC). One can also give integer seconds since 1970-01-01 00:00:00 UTC. Applies to time on a DAI worker that runs experiments. Preserves experiment artifacts made so far for summary and log zip files, but further artifacts are made.NOTE: If start new experiment with same parameters, restart, or refit, thisabsolute time will apply to such experiments or set of leaderboard experiments." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_abort_format do? : time abort format config.toml: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_abort_format. : time abort format config.toml: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Time string format for time_abort.: . : Set the time abort format config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_abort_format", + "output": "time abort format config.toml: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_abort_format", + "output": "time abort format config.toml: Time string format for time_abort.: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time abort format", + "output": "time abort format config.toml: Time string format for time_abort.: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Time string format for time_abort.: ", + "output": "time abort format config.toml: Time string format for time_abort.: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_abort_format", + "output": "time abort format config.toml: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_abort_format", + "output": "time abort format config.toml: Time string format for time_abort.: Any format is allowed as accepted by datetime.strptime." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_abort_timezone do? : time abort timezone config.toml: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_abort_timezone. : time abort timezone config.toml: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Time zone for time_abort.: . : Set the time abort timezone config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_abort_timezone", + "output": "time abort timezone config.toml: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_abort_timezone", + "output": "time abort timezone config.toml: Time zone for time_abort.: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time abort timezone", + "output": "time abort timezone config.toml: Time zone for time_abort.: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Time zone for time_abort.: ", + "output": "time abort timezone config.toml: Time zone for time_abort.: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_abort_timezone", + "output": "time abort timezone config.toml: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_abort_timezone", + "output": "time abort timezone config.toml: Time zone for time_abort.: Any time zone in format accepted by datetime.strptime." + }, + { + "prompt_type": "plain", + "instruction": ": What does delete_model_dirs_and_files do? : delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain delete_model_dirs_and_files. : delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete_model_dirs_and_files", + "output": "delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete_model_dirs_and_files", + "output": "delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete model dirs and files", + "output": "delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting delete_model_dirs_and_files", + "output": "delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting delete_model_dirs_and_files", + "output": "delete model dirs and files config.toml: Whether to delete all directories and files matching experiment pattern when call do_delete_model (True), or whether to just delete directories (False). False can be used to preserve experiment logs that do not take up much space. " + }, + { + "prompt_type": "plain", + "instruction": ": What does delete_data_dirs_and_files do? : delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain delete_data_dirs_and_files. : delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete_data_dirs_and_files", + "output": "delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete_data_dirs_and_files", + "output": "delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete data dirs and files", + "output": "delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting delete_data_dirs_and_files", + "output": "delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting delete_data_dirs_and_files", + "output": "delete data dirs and files config.toml: Whether to delete all directories and files matching dataset pattern when call do_delete_dataset (True), or whether to just delete directories (False). False can be used to preserve dataset logs that do not take up much space. " + }, + { + "prompt_type": "plain", + "instruction": ": What does recipe do? : recipe config.toml: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "plain", + "instruction": ": Explain recipe. : recipe config.toml: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Pipeline Building Recipe: . : Set the recipe config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe", + "output": "recipe config.toml: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe", + "output": "recipe config.toml: Pipeline Building Recipe: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe", + "output": "recipe config.toml: Pipeline Building Recipe: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Pipeline Building Recipe: ", + "output": "recipe config.toml: Pipeline Building Recipe: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting recipe", + "output": "recipe config.toml: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting recipe", + "output": "recipe config.toml: Pipeline Building Recipe: # Recipe type## Recipes override any GUI settings- **'auto'**: all models and features automatically determined by experiment settings, toml settings, and feature_engineering_effort- **'compliant'** : like 'auto' except: - *interpretability=10* (to avoid complexity, overrides GUI or python client chose for interpretability) - *enable_glm='on'* (rest 'off', to avoid complexity and be compatible with algorithms supported by MLI) - *fixed_ensemble_level=0*: Don't use any ensemble - *feature_brain_level=0*(: No feature brain used (to ensure every restart is identical) - *max_feature_interaction_depth=1*: interaction depth is set to 1 (no multi-feature interactions to avoid complexity) - *target_transformer='identity'*: for regression (to avoid complexity) - *check_distribution_shift_drop='off'*: Don't use distribution shift between train, valid, and test to drop features (bit risky without fine-tuning)- **'monotonic_gbm'** : like 'auto' except: - *monotonicity_constraints_interpretability_switch=1*: enable monotonicity constraints - *self.config.monotonicity_constraints_correlation_threshold = 0.01*: see below - *monotonicity_constraints_drop_low_correlation_features=true*: drop features that aren't correlated with target by at least 0.01 (specified by parameter above) - *fixed_ensemble_level=0*: Don't use any ensemble (to avoid complexity) - *included_models=['LightGBMModel']* - *included_transformers=['OriginalTransformer']*: only original (numeric) features will be used - *feature_brain_level=0*: No feature brain used (to ensure every restart is identical) - *monotonicity_constraints_log_level='high'* - *autodoc_pd_max_runtime=-1*: no timeout for PDP creation in AutoDoc- **'kaggle'** : like 'auto' except: - external validation set is concatenated with train set, with target marked as missing - test set is concatenated with train set, with target marked as missing - transformers that do not use the target are allowed to fit_transform across entire train + validation + test - several config toml expert options open-up limits (e.g. more numerics are treated as categoricals) - Note: If plentiful memory, can: - choose kaggle mode and then change fixed_feature_interaction_depth to large negative number, otherwise default number of features given to transformer is limited to 50 by default - choose mutation_mode = \"full\", so even more types are transformations are done at once per transformer- **'nlp_model'**: Only enables NLP models that process pure text- **'nlp_transformer'**: Only enables NLP transformers that process pure text, while any model type is allowed- **'image_model'**: Only enables Image models that process pure images- **'image_transformer'**: Only enables Image transformers that process pure images, while any model type is allowed- **'unsupervised'**: Only enables unsupervised transformers, models and scorers- **'gpus_max'**: Maximize use of GPUs (e.g. use XGBoost, rapids, Optuna hyperparameter search, etc.)- **'more_overfit_protection'**: Potentially improve overfit, esp. for small data, by disabling target encoding and making GA behave like final model for tree counts and learning rate- **'feature_store_mojo'**: Creates a MOJO to be used as transformer in the H2O Feature Store, to augment data on a row-by-row level based on Driverless AI's feature engineering. Only includes transformers that don't depend on the target, since features like target encoding need to be created at model fitting time to avoid data leakage. And features like lags need to be created from the raw data, they can't be computed with a row-by-row MOJO transformer.Each pipeline building recipe mode can be chosen, and then fine-tuned using each expert settings. Changing thepipeline building recipe will reset all pipeline building recipe options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of pipeline buildingrecipe rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, the recipe rules are not re-appliedand any fine-tuning is preserved. To reset recipe behavior, one can switch between 'auto' and the desired mode. Thisway the new child experiment will use the default settings for the chosen recipe." + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_unsupervised_expert_mode do? : custom unsupervised expert mode config.toml: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_unsupervised_expert_mode. : custom unsupervised expert mode config.toml: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to treat custom unsupervised model like UnsupervisedModel: . : Set the custom unsupervised expert mode config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_unsupervised_expert_mode", + "output": "custom unsupervised expert mode config.toml: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_unsupervised_expert_mode", + "output": "custom unsupervised expert mode config.toml: Whether to treat custom unsupervised model like UnsupervisedModel: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom unsupervised expert mode", + "output": "custom unsupervised expert mode config.toml: Whether to treat custom unsupervised model like UnsupervisedModel: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to treat custom unsupervised model like UnsupervisedModel: ", + "output": "custom unsupervised expert mode config.toml: Whether to treat custom unsupervised model like UnsupervisedModel: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_unsupervised_expert_mode", + "output": "custom unsupervised expert mode config.toml: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_unsupervised_expert_mode", + "output": "custom unsupervised expert mode config.toml: Whether to treat custom unsupervised model like UnsupervisedModel: Whether to treat model like UnsupervisedModel, so that one specifies each scorer, pretransformer, and transformer in expert panel like one would do for supervised experiments. Otherwise (False), custom unsupervised models will assume the model itself specified these. If the unsupervised model chosen has _included_transformers, _included_pretransformers, and _included_scorers selected, this should be set to False (default) else should be set to True. Then if one wants the unsupervised model to only produce 1 gene-transformer, then the custom unsupervised model can have: _ngenes_max = 1 _ngenes_max_by_layer = [1000, 1] The 1000 for the pretransformer layer just means that layer can have any number of genes. Choose 1 if you expect single instance of the pretransformer to be all one needs, e.g. consumes input features fully and produces complete useful output features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_genetic_algorithm do? : enable genetic algorithm config.toml: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_genetic_algorithm. : enable genetic algorithm config.toml: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable genetic algorithm for selection and tuning of features and models: . : Set the enable genetic algorithm config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_genetic_algorithm", + "output": "enable genetic algorithm config.toml: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_genetic_algorithm", + "output": "enable genetic algorithm config.toml: Enable genetic algorithm for selection and tuning of features and models: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable genetic algorithm", + "output": "enable genetic algorithm config.toml: Enable genetic algorithm for selection and tuning of features and models: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable genetic algorithm for selection and tuning of features and models: ", + "output": "enable genetic algorithm config.toml: Enable genetic algorithm for selection and tuning of features and models: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_genetic_algorithm", + "output": "enable genetic algorithm config.toml: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_genetic_algorithm", + "output": "enable genetic algorithm config.toml: Enable genetic algorithm for selection and tuning of features and models: Whether to enable genetic algorithm for selection and hyper-parameter tuning of features and models.- If disabled ('off'), will go directly to final pipeline training (using default feature engineering and feature selection).- 'auto' is same as 'on' unless pure NLP or Image experiment.- \"Optuna\": Uses DAI genetic algorithm for feature engineering, but model hyperparameters are tuned with Optuna. - In the Optuna case, the scores shown in the iteration panel are the best score and trial scores. - Optuna mode currently only uses Optuna for XGBoost, LightGBM, and CatBoost (custom recipe). - If Pruner is enabled, as is default, Optuna mode disables mutations of eval_metric so pruning uses same metric across trials to compare properly.Currently does not supported when pre_transformers or multi-layer pipeline used, which must go through at least one round of tuning or evolution. " + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_engineering_effort do? : feature engineering effort config.toml: How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_engineering_effort. : feature engineering effort config.toml: How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Feature engineering effort (0..10): . : Set the feature engineering effort config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_engineering_effort", + "output": "feature engineering effort config.toml: How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_engineering_effort", + "output": "feature engineering effort config.toml: Feature engineering effort (0..10): How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature engineering effort", + "output": "feature engineering effort config.toml: Feature engineering effort (0..10): How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Feature engineering effort (0..10): ", + "output": "feature engineering effort config.toml: Feature engineering effort (0..10): How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_engineering_effort", + "output": "feature engineering effort config.toml: How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_engineering_effort", + "output": "feature engineering effort config.toml: Feature engineering effort (0..10): How much effort to spend on feature engineering (-1...10)Heuristic combination of various developer-level toml parameters-1 : auto (5, except 1 for wide data in order to limit engineering)0 : keep only numeric features, only model tuning during evolution1 : keep only numeric features and frequency-encoded categoricals, only model tuning during evolution2 : Like #1 but instead just no Text features. Some feature tuning before evolution.3 : Like #5 but only tuning during evolution. Mixed tuning of features and model parameters.4 : Like #5, but slightly more focused on model tuning5 : Default. Balanced feature-model tuning6-7 : Like #5, but slightly more focused on feature engineering8 : Like #6-7, but even more focused on feature engineering with high feature generation rate, no feature dropping even if high interpretability9-10: Like #8, but no model tuning during feature evolution " + }, + { + "prompt_type": "plain", + "instruction": ": What does check_distribution_shift do? : check distribution shift config.toml: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_distribution_shift. : check distribution shift config.toml: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Data distribution shift detection: . : Set the check distribution shift config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_distribution_shift", + "output": "check distribution shift config.toml: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_distribution_shift", + "output": "check distribution shift config.toml: Data distribution shift detection: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check distribution shift", + "output": "check distribution shift config.toml: Data distribution shift detection: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Data distribution shift detection: ", + "output": "check distribution shift config.toml: Data distribution shift detection: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_distribution_shift", + "output": "check distribution shift config.toml: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_distribution_shift", + "output": "check distribution shift config.toml: Data distribution shift detection: Whether to enable train/valid and train/test distribution shift detection ('auto'/'on'/'off').By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "plain", + "instruction": ": What does check_distribution_shift_transformed do? : check distribution shift transformed config.toml: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_distribution_shift_transformed. : check distribution shift transformed config.toml: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Data distribution shift detection on transformed features: . : Set the check distribution shift transformed config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_distribution_shift_transformed", + "output": "check distribution shift transformed config.toml: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_distribution_shift_transformed", + "output": "check distribution shift transformed config.toml: Data distribution shift detection on transformed features: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check distribution shift transformed", + "output": "check distribution shift transformed config.toml: Data distribution shift detection on transformed features: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Data distribution shift detection on transformed features: ", + "output": "check distribution shift transformed config.toml: Data distribution shift detection on transformed features: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_distribution_shift_transformed", + "output": "check distribution shift transformed config.toml: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_distribution_shift_transformed", + "output": "check distribution shift transformed config.toml: Data distribution shift detection on transformed features: Whether to enable train/test distribution shift detection ('auto'/'on'/'off') for final model transformed features.By default, LightGBMModel is used for shift detection if possible, unless it is turned off in modelexpert panel, and then only the models selected in recipe list will be used. " + }, + { + "prompt_type": "plain", + "instruction": ": What does check_distribution_shift_drop do? : check distribution shift drop config.toml: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_distribution_shift_drop. : check distribution shift drop config.toml: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Data distribution shift detection drop of features: . : Set the check distribution shift drop config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_distribution_shift_drop", + "output": "check distribution shift drop config.toml: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_distribution_shift_drop", + "output": "check distribution shift drop config.toml: Data distribution shift detection drop of features: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check distribution shift drop", + "output": "check distribution shift drop config.toml: Data distribution shift detection drop of features: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Data distribution shift detection drop of features: ", + "output": "check distribution shift drop config.toml: Data distribution shift detection drop of features: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_distribution_shift_drop", + "output": "check distribution shift drop config.toml: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_distribution_shift_drop", + "output": "check distribution shift drop config.toml: Data distribution shift detection drop of features: Whether to drop high-shift features ('auto'/'on'/'off'). Auto disables for time series." + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_features_distribution_shift_threshold_auc do? : drop features distribution shift threshold auc config.toml: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_features_distribution_shift_threshold_auc. : drop features distribution shift threshold auc config.toml: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max allowed feature shift (AUC) before dropping feature: . : Set the drop features distribution shift threshold auc config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_distribution_shift_threshold_auc", + "output": "drop features distribution shift threshold auc config.toml: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_distribution_shift_threshold_auc", + "output": "drop features distribution shift threshold auc config.toml: Max allowed feature shift (AUC) before dropping feature: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop features distribution shift threshold auc", + "output": "drop features distribution shift threshold auc config.toml: Max allowed feature shift (AUC) before dropping feature: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max allowed feature shift (AUC) before dropping feature: ", + "output": "drop features distribution shift threshold auc config.toml: Max allowed feature shift (AUC) before dropping feature: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_features_distribution_shift_threshold_auc", + "output": "drop features distribution shift threshold auc config.toml: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_features_distribution_shift_threshold_auc", + "output": "drop features distribution shift threshold auc config.toml: Max allowed feature shift (AUC) before dropping feature: If distribution shift detection is enabled, drop features (except ID, text, date/datetime, time, weight) forwhich shift AUC, GINI, or Spearman correlation is above this value(e.g. AUC of a binary classifier that predicts whether given feature valuebelongs to train or test data) " + }, + { + "prompt_type": "plain", + "instruction": ": What does check_leakage do? : check leakage config.toml: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_leakage. : check leakage config.toml: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Leakage detection: . : Set the check leakage config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_leakage", + "output": "check leakage config.toml: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_leakage", + "output": "check leakage config.toml: Leakage detection: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check leakage", + "output": "check leakage config.toml: Leakage detection: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Leakage detection: ", + "output": "check leakage config.toml: Leakage detection: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_leakage", + "output": "check leakage config.toml: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_leakage", + "output": "check leakage config.toml: Leakage detection: Specify whether to check leakage for each feature (``on`` or ``off``).If a fold column is used, this option checks leakage without using the fold column.By default, LightGBM Model is used for leakage detection when possible, unless it isturned off in the Model Expert Settings tab, in which case only the models selected withthe ``included_models`` option are used. Note that this option is always disabled for timeseries experiments. " + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_features_leakage_threshold_auc do? : drop features leakage threshold auc config.toml: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_features_leakage_threshold_auc. : drop features leakage threshold auc config.toml: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Leakage detection dropping AUC/R2 threshold: . : Set the drop features leakage threshold auc config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_leakage_threshold_auc", + "output": "drop features leakage threshold auc config.toml: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_leakage_threshold_auc", + "output": "drop features leakage threshold auc config.toml: Leakage detection dropping AUC/R2 threshold: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop features leakage threshold auc", + "output": "drop features leakage threshold auc config.toml: Leakage detection dropping AUC/R2 threshold: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Leakage detection dropping AUC/R2 threshold: ", + "output": "drop features leakage threshold auc config.toml: Leakage detection dropping AUC/R2 threshold: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_features_leakage_threshold_auc", + "output": "drop features leakage threshold auc config.toml: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_features_leakage_threshold_auc", + "output": "drop features leakage threshold auc config.toml: Leakage detection dropping AUC/R2 threshold: If leakage detection is enabled, drop features for which AUC (R2 for regression), GINI, or Spearman correlation is above this value. If fold column present, features are not dropped, because leakage test applies without fold column used. " + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_max_data_size do? : leakage max data size config.toml: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_max_data_size. : leakage max data size config.toml: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max rows x columns for leakage: . : Set the leakage max data size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_max_data_size", + "output": "leakage max data size config.toml: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_max_data_size", + "output": "leakage max data size config.toml: Max rows x columns for leakage: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage max data size", + "output": "leakage max data size config.toml: Max rows x columns for leakage: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max rows x columns for leakage: ", + "output": "leakage max data size config.toml: Max rows x columns for leakage: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_max_data_size", + "output": "leakage max data size config.toml: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_max_data_size", + "output": "leakage max data size config.toml: Max rows x columns for leakage: Max number of rows x number of columns to trigger (stratified) sampling for leakage checks " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_features_importance do? : max features importance config.toml: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_features_importance. : max features importance config.toml: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. num. features for variable importance: . : Set the max features importance config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_features_importance", + "output": "max features importance config.toml: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_features_importance", + "output": "max features importance config.toml: Max. num. features for variable importance: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max features importance", + "output": "max features importance config.toml: Max. num. features for variable importance: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. num. features for variable importance: ", + "output": "max features importance config.toml: Max. num. features for variable importance: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_features_importance", + "output": "max features importance config.toml: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_features_importance", + "output": "max features importance config.toml: Max. num. features for variable importance: Specify the maximum number of features to use and show in importance tables.When Interpretability is set higher than 1,transformed or original features with lower importance than the top max_features_importance features are always removed.Feature importances of transformed or original features correspondingly will be pruned.Higher values can lead to lower performance and larger disk space used for datasets with more than 100k columns. " + }, + { + "prompt_type": "plain", + "instruction": ": What does make_python_scoring_pipeline do? : make python scoring pipeline config.toml: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_python_scoring_pipeline. : make python scoring pipeline config.toml: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Make Python scoring pipeline: . : Set the make python scoring pipeline config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_python_scoring_pipeline", + "output": "make python scoring pipeline config.toml: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_python_scoring_pipeline", + "output": "make python scoring pipeline config.toml: Make Python scoring pipeline: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make python scoring pipeline", + "output": "make python scoring pipeline config.toml: Make Python scoring pipeline: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Make Python scoring pipeline: ", + "output": "make python scoring pipeline config.toml: Make Python scoring pipeline: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_python_scoring_pipeline", + "output": "make python scoring pipeline config.toml: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_python_scoring_pipeline", + "output": "make python scoring pipeline config.toml: Make Python scoring pipeline: Whether to create the Python scoring pipeline at the end of each experiment." + }, + { + "prompt_type": "plain", + "instruction": ": What does make_mojo_scoring_pipeline do? : make mojo scoring pipeline config.toml: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_mojo_scoring_pipeline. : make mojo scoring pipeline config.toml: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Make MOJO scoring pipeline: . : Set the make mojo scoring pipeline config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_mojo_scoring_pipeline", + "output": "make mojo scoring pipeline config.toml: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_mojo_scoring_pipeline", + "output": "make mojo scoring pipeline config.toml: Make MOJO scoring pipeline: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make mojo scoring pipeline", + "output": "make mojo scoring pipeline config.toml: Make MOJO scoring pipeline: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Make MOJO scoring pipeline: ", + "output": "make mojo scoring pipeline config.toml: Make MOJO scoring pipeline: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_mojo_scoring_pipeline", + "output": "make mojo scoring pipeline config.toml: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_mojo_scoring_pipeline", + "output": "make mojo scoring pipeline config.toml: Make MOJO scoring pipeline: Whether to create the MOJO scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. " + }, + { + "prompt_type": "plain", + "instruction": ": What does make_triton_scoring_pipeline do? : make triton scoring pipeline config.toml: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_triton_scoring_pipeline. : make triton scoring pipeline config.toml: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Make Triton scoring pipeline: . : Set the make triton scoring pipeline config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_triton_scoring_pipeline", + "output": "make triton scoring pipeline config.toml: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_triton_scoring_pipeline", + "output": "make triton scoring pipeline config.toml: Make Triton scoring pipeline: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make triton scoring pipeline", + "output": "make triton scoring pipeline config.toml: Make Triton scoring pipeline: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Make Triton scoring pipeline: ", + "output": "make triton scoring pipeline config.toml: Make Triton scoring pipeline: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_triton_scoring_pipeline", + "output": "make triton scoring pipeline config.toml: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_triton_scoring_pipeline", + "output": "make triton scoring pipeline config.toml: Make Triton scoring pipeline: Whether to create a C++ MOJO based Triton scoring pipeline at the end of each experiment. If set to \"auto\", will attempt tocreate it if possible (without dropping capabilities). If set to \"on\", might need to drop some models,transformers or custom recipes. Requires make_mojo_scoring_pipeline != \"off\". " + }, + { + "prompt_type": "plain", + "instruction": ": What does auto_deploy_triton_scoring_pipeline do? : auto deploy triton scoring pipeline config.toml: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auto_deploy_triton_scoring_pipeline. : auto deploy triton scoring pipeline config.toml: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to automatically deploy every model to built-in or remote Triton inference server.: . : Set the auto deploy triton scoring pipeline config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auto_deploy_triton_scoring_pipeline", + "output": "auto deploy triton scoring pipeline config.toml: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auto_deploy_triton_scoring_pipeline", + "output": "auto deploy triton scoring pipeline config.toml: Whether to automatically deploy every model to built-in or remote Triton inference server.: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auto deploy triton scoring pipeline", + "output": "auto deploy triton scoring pipeline config.toml: Whether to automatically deploy every model to built-in or remote Triton inference server.: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to automatically deploy every model to built-in or remote Triton inference server.: ", + "output": "auto deploy triton scoring pipeline config.toml: Whether to automatically deploy every model to built-in or remote Triton inference server.: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auto_deploy_triton_scoring_pipeline", + "output": "auto deploy triton scoring pipeline config.toml: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auto_deploy_triton_scoring_pipeline", + "output": "auto deploy triton scoring pipeline config.toml: Whether to automatically deploy every model to built-in or remote Triton inference server.: Whether to automatically deploy the model to the Triton inference server at the end of each experiment.\"local\" will deploy to the local (built-in) Triton inference server to location specified by triton_model_repository_dir_local.\"remote\" will deploy to the remote Triton inference server to location provided by triton_host_remote (and optionally, triton_model_repository_dir_remote).\"off\" requires manual action (Deploy wizard or Python client or manual transfer of exported Triton directory from Deploy wizard) to deploy the model to Triton. " + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_dedup_local_tmp do? : triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_dedup_local_tmp. : triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_dedup_local_tmp", + "output": "triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_dedup_local_tmp", + "output": "triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton dedup local tmp", + "output": "triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_dedup_local_tmp", + "output": "triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_dedup_local_tmp", + "output": "triton dedup local tmp config.toml: Replace duplicate files inside the Triton tmp directory with hard links, to significantly reduce the used disk space for local Triton deployments." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_mini_acceptance_test_local do? : triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_mini_acceptance_test_local. : triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Test local Triton deployments during creation of MOJO pipeline.: . : Set the triton mini acceptance test local config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_mini_acceptance_test_local", + "output": "triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_mini_acceptance_test_local", + "output": "triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline.: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton mini acceptance test local", + "output": "triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline.: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Test local Triton deployments during creation of MOJO pipeline.: ", + "output": "triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline.: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_mini_acceptance_test_local", + "output": "triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_mini_acceptance_test_local", + "output": "triton mini acceptance test local config.toml: Test local Triton deployments during creation of MOJO pipeline.: Test local Triton deployments during creation of MOJO pipeline. Requires enable_triton_server_local and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_mini_acceptance_test_remote do? : triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_mini_acceptance_test_remote. : triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Test remote Triton deployments during creation of MOJO pipeline.: . : Set the triton mini acceptance test remote config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_mini_acceptance_test_remote", + "output": "triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_mini_acceptance_test_remote", + "output": "triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline.: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton mini acceptance test remote", + "output": "triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline.: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Test remote Triton deployments during creation of MOJO pipeline.: ", + "output": "triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline.: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_mini_acceptance_test_remote", + "output": "triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_mini_acceptance_test_remote", + "output": "triton mini acceptance test remote config.toml: Test remote Triton deployments during creation of MOJO pipeline.: Test remote Triton deployments during creation of MOJO pipeline. Requires triton_host_remote to be configured and make_triton_scoring_pipeline to be enabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_for_predictions_benchmark do? : mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_for_predictions_benchmark. : mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark", + "output": "mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark", + "output": "mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo for predictions benchmark", + "output": "mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_for_predictions_benchmark", + "output": "mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_for_predictions_benchmark", + "output": "mojo for predictions benchmark config.toml: Perform timing and accuracy benchmarks for Injected MOJO scoring vs Python scoring. This is for full scoring data, and can be slow. This also requires hard asserts. Doesn't force MOJO scoring by itself, so depends on mojo_for_predictions='on' if want full coverage." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_for_predictions_benchmark_slower_than_python_threshold do? : mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_for_predictions_benchmark_slower_than_python_threshold. : mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark_slower_than_python_threshold", + "output": "mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark_slower_than_python_threshold", + "output": "mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo for predictions benchmark slower than python threshold", + "output": "mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_for_predictions_benchmark_slower_than_python_threshold", + "output": "mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_for_predictions_benchmark_slower_than_python_threshold", + "output": "mojo for predictions benchmark slower than python threshold config.toml: Fail hard if MOJO scoring is this many times slower than Python scoring." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_for_predictions_benchmark_slower_than_python_min_rows do? : mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_for_predictions_benchmark_slower_than_python_min_rows. : mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark_slower_than_python_min_rows", + "output": "mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark_slower_than_python_min_rows", + "output": "mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo for predictions benchmark slower than python min rows", + "output": "mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_for_predictions_benchmark_slower_than_python_min_rows", + "output": "mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_for_predictions_benchmark_slower_than_python_min_rows", + "output": "mojo for predictions benchmark slower than python min rows config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if have at least this many rows. To reduce false positives." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_for_predictions_benchmark_slower_than_python_min_seconds do? : mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_for_predictions_benchmark_slower_than_python_min_seconds. : mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark_slower_than_python_min_seconds", + "output": "mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_benchmark_slower_than_python_min_seconds", + "output": "mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo for predictions benchmark slower than python min seconds", + "output": "mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_for_predictions_benchmark_slower_than_python_min_seconds", + "output": "mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_for_predictions_benchmark_slower_than_python_min_seconds", + "output": "mojo for predictions benchmark slower than python min seconds config.toml: Fail hard if MOJO scoring is slower than Python scoring by a factor specified by mojo_for_predictions_benchmark_slower_than_python_threshold, but only if takes at least this many seconds. To reduce false positives." + }, + { + "prompt_type": "plain", + "instruction": ": What does inject_mojo_for_predictions do? : inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "plain", + "instruction": ": Explain inject_mojo_for_predictions. : inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "inject_mojo_for_predictions", + "output": "inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "inject_mojo_for_predictions", + "output": "inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "inject mojo for predictions", + "output": "inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting inject_mojo_for_predictions", + "output": "inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting inject_mojo_for_predictions", + "output": "inject mojo for predictions config.toml: Inject MOJO into fitted Python state if mini acceptance test passes, so can use C++ MOJO runtime when calling predict(enable_mojo=True, IS_SCORER=True, ...). Prerequisite for mojo_for_predictions='on' or 'auto'." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_for_predictions do? : mojo for predictions config.toml: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_for_predictions. : mojo for predictions config.toml: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Allow use of MOJO for making predictions: . : Set the mojo for predictions config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions", + "output": "mojo for predictions config.toml: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions", + "output": "mojo for predictions config.toml: Allow use of MOJO for making predictions: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo for predictions", + "output": "mojo for predictions config.toml: Allow use of MOJO for making predictions: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allow use of MOJO for making predictions: ", + "output": "mojo for predictions config.toml: Allow use of MOJO for making predictions: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_for_predictions", + "output": "mojo for predictions config.toml: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_for_predictions", + "output": "mojo for predictions config.toml: Allow use of MOJO for making predictions: Use MOJO for making fast low-latency predictions after experiment has finished (when applicable, for AutoDoc/Diagnostics/Predictions/MLI and standalone Python scoring via scorer.zip). For 'auto', only use MOJO if number of rows is equal or below mojo_for_predictions_max_rows. For larger frames, it can be faster to use the Python backend since used libraries are more likely already vectorized." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_for_predictions_max_rows do? : mojo for predictions max rows config.toml: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_for_predictions_max_rows. : mojo for predictions max rows config.toml: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max number of rows for C++ MOJO predictions: . : Set the mojo for predictions max rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_max_rows", + "output": "mojo for predictions max rows config.toml: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_max_rows", + "output": "mojo for predictions max rows config.toml: Max number of rows for C++ MOJO predictions: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo for predictions max rows", + "output": "mojo for predictions max rows config.toml: Max number of rows for C++ MOJO predictions: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max number of rows for C++ MOJO predictions: ", + "output": "mojo for predictions max rows config.toml: Max number of rows for C++ MOJO predictions: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_for_predictions_max_rows", + "output": "mojo for predictions max rows config.toml: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_for_predictions_max_rows", + "output": "mojo for predictions max rows config.toml: Max number of rows for C++ MOJO predictions: For smaller datasets, the single-threaded but low latency C++ MOJO runtime can lead to significantly faster scoring times than the regular in-Driverless AI Python scoring environment. If enable_mojo=True is passed to the predict API, and the MOJO exists and is applicable, then use the MOJO runtime for datasets that have fewer or equal number of rows than this threshold. MLI/AutoDoc set enable_mojo=True by default, so this setting applies. This setting is only used if mojo_for_predictions is 'auto'." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_for_predictions_batch_size do? : mojo for predictions batch size config.toml: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_for_predictions_batch_size. : mojo for predictions batch size config.toml: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Batch size for C++ MOJO predictions.: . : Set the mojo for predictions batch size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_batch_size", + "output": "mojo for predictions batch size config.toml: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_for_predictions_batch_size", + "output": "mojo for predictions batch size config.toml: Batch size for C++ MOJO predictions.: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo for predictions batch size", + "output": "mojo for predictions batch size config.toml: Batch size for C++ MOJO predictions.: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Batch size for C++ MOJO predictions.: ", + "output": "mojo for predictions batch size config.toml: Batch size for C++ MOJO predictions.: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_for_predictions_batch_size", + "output": "mojo for predictions batch size config.toml: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_for_predictions_batch_size", + "output": "mojo for predictions batch size config.toml: Batch size for C++ MOJO predictions.: Batch size (in rows) for C++ MOJO predictions. Only when enable_mojo=True is passed to the predict API, and when the MOJO is applicable (e.g., fewer rows than mojo_for_predictions_max_rows). Larger values can lead to faster scoring, but use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_acceptance_test_rtol do? : mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_acceptance_test_rtol. : mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Relative tolerance for mini MOJO acceptance test.: . : Set the mojo acceptance test rtol config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_acceptance_test_rtol", + "output": "mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_acceptance_test_rtol", + "output": "mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test.: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo acceptance test rtol", + "output": "mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test.: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Relative tolerance for mini MOJO acceptance test.: ", + "output": "mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test.: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_acceptance_test_rtol", + "output": "mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_acceptance_test_rtol", + "output": "mojo acceptance test rtol config.toml: Relative tolerance for mini MOJO acceptance test.: Relative tolerance for mini MOJO acceptance test. If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_acceptance_test_atol do? : mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_acceptance_test_atol. : mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Absolute tolerance for mini MOJO acceptance test.: . : Set the mojo acceptance test atol config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_acceptance_test_atol", + "output": "mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_acceptance_test_atol", + "output": "mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test.: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo acceptance test atol", + "output": "mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test.: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Absolute tolerance for mini MOJO acceptance test.: ", + "output": "mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test.: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_acceptance_test_atol", + "output": "mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_acceptance_test_atol", + "output": "mojo acceptance test atol config.toml: Absolute tolerance for mini MOJO acceptance test.: Absolute tolerance for mini MOJO acceptance test (for regression/Shapley, will be scaled by max(abs(preds)). If Python/C++ MOJO differs more than this from Python, won't use MOJO inside Python for later scoring. Only applicable if mojo_for_predictions=True. Disabled if <= 0." + }, + { + "prompt_type": "plain", + "instruction": ": What does reduce_mojo_size do? : reduce mojo size config.toml: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain reduce_mojo_size. : reduce mojo size config.toml: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Attempt to reduce the size of the MOJO: . : Set the reduce mojo size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reduce_mojo_size", + "output": "reduce mojo size config.toml: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reduce_mojo_size", + "output": "reduce mojo size config.toml: Attempt to reduce the size of the MOJO: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reduce mojo size", + "output": "reduce mojo size config.toml: Attempt to reduce the size of the MOJO: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Attempt to reduce the size of the MOJO: ", + "output": "reduce mojo size config.toml: Attempt to reduce the size of the MOJO: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting reduce_mojo_size", + "output": "reduce mojo size config.toml: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting reduce_mojo_size", + "output": "reduce mojo size config.toml: Attempt to reduce the size of the MOJO: Whether to attempt to reduce the size of the MOJO scoring pipeline. A smaller MOJO will also lead toless memory footprint during scoring. It is achieved by reducing some other settings like interaction depth, andhence can affect the predictive accuracy of the model. " + }, + { + "prompt_type": "plain", + "instruction": ": What does make_pipeline_visualization do? : make pipeline visualization config.toml: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_pipeline_visualization. : make pipeline visualization config.toml: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Make pipeline visualization: . : Set the make pipeline visualization config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_pipeline_visualization", + "output": "make pipeline visualization config.toml: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_pipeline_visualization", + "output": "make pipeline visualization config.toml: Make pipeline visualization: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make pipeline visualization", + "output": "make pipeline visualization config.toml: Make pipeline visualization: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Make pipeline visualization: ", + "output": "make pipeline visualization config.toml: Make pipeline visualization: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_pipeline_visualization", + "output": "make pipeline visualization config.toml: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_pipeline_visualization", + "output": "make pipeline visualization config.toml: Make pipeline visualization: Whether to create the pipeline visualization at the end of each experiment.Uses MOJO to show pipeline, input features, transformers, model, and outputs of model. MOJO-capable tree models show first tree." + }, + { + "prompt_type": "plain", + "instruction": ": What does make_python_pipeline_visualization do? : make python pipeline visualization config.toml: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_python_pipeline_visualization. : make python pipeline visualization config.toml: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Make python pipeline visualization: . : Set the make python pipeline visualization config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_python_pipeline_visualization", + "output": "make python pipeline visualization config.toml: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_python_pipeline_visualization", + "output": "make python pipeline visualization config.toml: Make python pipeline visualization: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make python pipeline visualization", + "output": "make python pipeline visualization config.toml: Make python pipeline visualization: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Make python pipeline visualization: ", + "output": "make python pipeline visualization config.toml: Make python pipeline visualization: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_python_pipeline_visualization", + "output": "make python pipeline visualization config.toml: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_python_pipeline_visualization", + "output": "make python pipeline visualization config.toml: Make python pipeline visualization: Whether to create the python pipeline visualization at the end of each experiment. Each feature and transformer includes a variable importance at end in brackets. Only done when forced on, and artifacts as png files will appear in summary zip. Each experiment has files per individual in final population: 1) preprune_False_0.0 : Before final pruning, without any additional variable importance threshold pruning 2) preprune_True_0.0 : Before final pruning, with additional variable importance <=0.0 pruning 3) postprune_False_0.0 : After final pruning, without any additional variable importance threshold pruning 4) postprune_True_0.0 : After final pruning, with additional variable importance <=0.0 pruning 5) posttournament_False_0.0 : After final pruning and tournament, without any additional variable importance threshold pruning 6) posttournament_True_0.0 : After final pruning and tournament, with additional variable importance <=0.0 pruning 1-5 are done with 'on' while 'auto' only does 6 corresponding to the final post-pruned individuals. Even post pruning, some features have zero importance, because only those genes that have value+variance in variable importance of value=0.0 get pruned. GA can have many folds with positive variance for a gene, and those are not removed in case they are useful features for final model. If small mojo option is chosen (reduce_mojo_size True), then the variance of feature gain is ignored for which genes and features are pruned as well as for what appears in the graph. " + }, + { + "prompt_type": "plain", + "instruction": ": What does make_autoreport do? : make autoreport config.toml: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_autoreport. : make autoreport config.toml: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Make AutoDoc: . : Set the make autoreport config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_autoreport", + "output": "make autoreport config.toml: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_autoreport", + "output": "make autoreport config.toml: Make AutoDoc: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make autoreport", + "output": "make autoreport config.toml: Make AutoDoc: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Make AutoDoc: ", + "output": "make autoreport config.toml: Make AutoDoc: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_autoreport", + "output": "make autoreport config.toml: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_autoreport", + "output": "make autoreport config.toml: Make AutoDoc: Whether to create the experiment AutoDoc after end of experiment. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cols_make_autoreport_automatically do? : max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cols_make_autoreport_automatically. : max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_make_autoreport_automatically", + "output": "max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_make_autoreport_automatically", + "output": "max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cols make autoreport automatically", + "output": "max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of columns beyond which will not automatically build autoreport at end of experiment.: ", + "output": "max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cols_make_autoreport_automatically", + "output": "max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cols_make_autoreport_automatically", + "output": "max cols make autoreport automatically config.toml: Number of columns beyond which will not automatically build autoreport at end of experiment.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cols_make_pipeline_visualization_automatically do? : max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cols_make_pipeline_visualization_automatically. : max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_make_pipeline_visualization_automatically", + "output": "max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_make_pipeline_visualization_automatically", + "output": "max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cols make pipeline visualization automatically", + "output": "max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: ", + "output": "max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cols_make_pipeline_visualization_automatically", + "output": "max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cols_make_pipeline_visualization_automatically", + "output": "max cols make pipeline visualization automatically config.toml: Number of columns beyond which will not automatically build pipeline visualization at end of experiment.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does pass_env_to_deprecated_python_scoring do? : pass env to deprecated python scoring config.toml: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain pass_env_to_deprecated_python_scoring. : pass env to deprecated python scoring config.toml: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Pass environment variables to deprecated python scoring package: . : Set the pass env to deprecated python scoring config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pass_env_to_deprecated_python_scoring", + "output": "pass env to deprecated python scoring config.toml: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pass_env_to_deprecated_python_scoring", + "output": "pass env to deprecated python scoring config.toml: Pass environment variables to deprecated python scoring package: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pass env to deprecated python scoring", + "output": "pass env to deprecated python scoring config.toml: Pass environment variables to deprecated python scoring package: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Pass environment variables to deprecated python scoring package: ", + "output": "pass env to deprecated python scoring config.toml: Pass environment variables to deprecated python scoring package: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pass_env_to_deprecated_python_scoring", + "output": "pass env to deprecated python scoring config.toml: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pass_env_to_deprecated_python_scoring", + "output": "pass env to deprecated python scoring config.toml: Pass environment variables to deprecated python scoring package: Pass environment variables from running Driverless AI instance to Python scoring pipeline for deprecated models, when they are used to make predictions. Use with caution. If config.toml overrides are set by env vars, and they differ from what the experiment's env looked like when it was trained, then unexpected consequences can occur. Enable this only to \" override certain well-controlled settings like the port for H2O-3 custom recipe server. " + }, + { + "prompt_type": "plain", + "instruction": ": What does transformer_description_line_length do? : transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain transformer_description_line_length. : transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "transformer_description_line_length", + "output": "transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "transformer_description_line_length", + "output": "transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "transformer description line length", + "output": "transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: ", + "output": "transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting transformer_description_line_length", + "output": "transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting transformer_description_line_length", + "output": "transformer description line length config.toml: Line length for autoreport descriptions of transformers. -1 means use autodoc_keras_summary_line_length: " + }, + { + "prompt_type": "plain", + "instruction": ": What does benchmark_mojo_latency do? : benchmark mojo latency config.toml: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "plain", + "instruction": ": Explain benchmark_mojo_latency. : benchmark mojo latency config.toml: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Measure MOJO scoring latency: . : Set the benchmark mojo latency config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_mojo_latency", + "output": "benchmark mojo latency config.toml: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_mojo_latency", + "output": "benchmark mojo latency config.toml: Measure MOJO scoring latency: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark mojo latency", + "output": "benchmark mojo latency config.toml: Measure MOJO scoring latency: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Measure MOJO scoring latency: ", + "output": "benchmark mojo latency config.toml: Measure MOJO scoring latency: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting benchmark_mojo_latency", + "output": "benchmark mojo latency config.toml: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting benchmark_mojo_latency", + "output": "benchmark mojo latency config.toml: Measure MOJO scoring latency: Whether to measure the MOJO scoring latency at the time of MOJO creation." + }, + { + "prompt_type": "plain", + "instruction": ": What does benchmark_mojo_latency_auto_size_limit do? : benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "plain", + "instruction": ": Explain benchmark_mojo_latency_auto_size_limit. : benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max size of pipeline.mojo file (in MB) for when benchmark_mojo_latency is set to 'auto': . : Set the benchmark mojo latency auto size limit config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_mojo_latency_auto_size_limit", + "output": "benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_mojo_latency_auto_size_limit", + "output": "benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for when benchmark_mojo_latency is set to 'auto': Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark mojo latency auto size limit", + "output": "benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for when benchmark_mojo_latency is set to 'auto': Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max size of pipeline.mojo file (in MB) for when benchmark_mojo_latency is set to 'auto': ", + "output": "benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for when benchmark_mojo_latency is set to 'auto': Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting benchmark_mojo_latency_auto_size_limit", + "output": "benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting benchmark_mojo_latency_auto_size_limit", + "output": "benchmark mojo latency auto size limit config.toml: Max size of pipeline.mojo file (in MB) for when benchmark_mojo_latency is set to 'auto': Max size of pipeline.mojo file (in MB) for automatic mode of MOJO scoring latency measurement" + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_building_timeout do? : mojo building timeout config.toml: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_building_timeout. : mojo building timeout config.toml: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Timeout in seconds to wait for MOJO creation at end of experiment.: . : Set the mojo building timeout config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_building_timeout", + "output": "mojo building timeout config.toml: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_building_timeout", + "output": "mojo building timeout config.toml: Timeout in seconds to wait for MOJO creation at end of experiment.: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo building timeout", + "output": "mojo building timeout config.toml: Timeout in seconds to wait for MOJO creation at end of experiment.: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Timeout in seconds to wait for MOJO creation at end of experiment.: ", + "output": "mojo building timeout config.toml: Timeout in seconds to wait for MOJO creation at end of experiment.: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_building_timeout", + "output": "mojo building timeout config.toml: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_building_timeout", + "output": "mojo building timeout config.toml: Timeout in seconds to wait for MOJO creation at end of experiment.: If MOJO creation times out at end of experiment, can still make MOJO from the GUI or from the R/Py clients (timeout doesn't apply there)." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_vis_building_timeout do? : mojo vis building timeout config.toml: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_vis_building_timeout. : mojo vis building timeout config.toml: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Timeout in seconds to wait for MOJO visualization creation at end of experiment.: . : Set the mojo vis building timeout config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_vis_building_timeout", + "output": "mojo vis building timeout config.toml: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_vis_building_timeout", + "output": "mojo vis building timeout config.toml: Timeout in seconds to wait for MOJO visualization creation at end of experiment.: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo vis building timeout", + "output": "mojo vis building timeout config.toml: Timeout in seconds to wait for MOJO visualization creation at end of experiment.: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Timeout in seconds to wait for MOJO visualization creation at end of experiment.: ", + "output": "mojo vis building timeout config.toml: Timeout in seconds to wait for MOJO visualization creation at end of experiment.: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_vis_building_timeout", + "output": "mojo vis building timeout config.toml: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_vis_building_timeout", + "output": "mojo vis building timeout config.toml: Timeout in seconds to wait for MOJO visualization creation at end of experiment.: If MOJO visualization creation times out at end of experiment, MOJO is still created if possible within the time limit specified by mojo_building_timeout." + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_building_parallelism do? : mojo building parallelism config.toml: If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_building_parallelism. : mojo building parallelism config.toml: If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of parallel workers to use during MOJO creation (-1 = all cores): . : Set the mojo building parallelism config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_building_parallelism", + "output": "mojo building parallelism config.toml: If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_building_parallelism", + "output": "mojo building parallelism config.toml: Number of parallel workers to use during MOJO creation (-1 = all cores): If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo building parallelism", + "output": "mojo building parallelism config.toml: Number of parallel workers to use during MOJO creation (-1 = all cores): If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of parallel workers to use during MOJO creation (-1 = all cores): ", + "output": "mojo building parallelism config.toml: Number of parallel workers to use during MOJO creation (-1 = all cores): If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_building_parallelism", + "output": "mojo building parallelism config.toml: If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_building_parallelism", + "output": "mojo building parallelism config.toml: Number of parallel workers to use during MOJO creation (-1 = all cores): If MOJO creation is too slow, increase this value. Higher values can finish faster, but use more memory.If MOJO creation fails due to an out-of-memory error, reduce this value to 1.Set to -1 for all physical cores. " + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_building_parallelism_base_model_size_limit do? : mojo building parallelism base model size limit config.toml: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_building_parallelism_base_model_size_limit. : mojo building parallelism base model size limit config.toml: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Size of base models to allow mojo_building_parallelism: . : Set the mojo building parallelism base model size limit config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_building_parallelism_base_model_size_limit", + "output": "mojo building parallelism base model size limit config.toml: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_building_parallelism_base_model_size_limit", + "output": "mojo building parallelism base model size limit config.toml: Size of base models to allow mojo_building_parallelism: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo building parallelism base model size limit", + "output": "mojo building parallelism base model size limit config.toml: Size of base models to allow mojo_building_parallelism: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Size of base models to allow mojo_building_parallelism: ", + "output": "mojo building parallelism base model size limit config.toml: Size of base models to allow mojo_building_parallelism: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_building_parallelism_base_model_size_limit", + "output": "mojo building parallelism base model size limit config.toml: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_building_parallelism_base_model_size_limit", + "output": "mojo building parallelism base model size limit config.toml: Size of base models to allow mojo_building_parallelism: Size in bytes that all pickled and compressed base models have to satisfy to use parallel MOJO building. For large base models, parallel MOJO building can use too much memory. Only used if final_fitted_model_per_model_fold_files is true. " + }, + { + "prompt_type": "plain", + "instruction": ": What does show_pipeline_sizes do? : show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_pipeline_sizes. : show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to show model and pipeline sizes in logs: . : Set the show pipeline sizes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_pipeline_sizes", + "output": "show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_pipeline_sizes", + "output": "show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show pipeline sizes", + "output": "show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to show model and pipeline sizes in logs: ", + "output": "show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_pipeline_sizes", + "output": "show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_pipeline_sizes", + "output": "show pipeline sizes config.toml: Whether to show model and pipeline sizes in logs: Whether to show model and pipeline sizes in logs. If 'auto', then not done if more than 10 base models+folds, because expect not concerned with size." + }, + { + "prompt_type": "plain", + "instruction": ": What does exclusive_mode do? : exclusive mode config.toml: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain exclusive_mode. : exclusive mode config.toml: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclusive level of access to node resources: . : Set the exclusive mode config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "exclusive_mode", + "output": "exclusive mode config.toml: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "exclusive_mode", + "output": "exclusive mode config.toml: Exclusive level of access to node resources: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "exclusive mode", + "output": "exclusive mode config.toml: Exclusive level of access to node resources: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclusive level of access to node resources: ", + "output": "exclusive mode config.toml: Exclusive level of access to node resources: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting exclusive_mode", + "output": "exclusive mode config.toml: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting exclusive_mode", + "output": "exclusive mode config.toml: Exclusive level of access to node resources: safe: assume might be running another experiment on same nodemoderate: assume not running any other experiments or tasks on same node, but still only use physical core countmax: assume not running anything else on node at all except the experimentIf multinode is enabled, this option has no effect, unless worker_remote_processors=1 when it will still be applied.Each exclusive mode can be chosen, and then fine-tuned using each expert settings. Changing theexclusive mode will reset all exclusive mode related options back to default and then re-apply thespecific rules for the new mode, which will undo any fine-tuning of expert options that are part of exclusive mode rules.If choose to do new/continued/refitted/retrained experiment from parent experiment, all the mode rules are not re-appliedand any fine-tuning is preserved. To reset mode behavior, one can switch between 'safe' and the desired mode. Thisway the new child experiment will use the default system resources for the chosen mode. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_workers do? : max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_workers. : max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers", + "output": "max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers", + "output": "max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max workers", + "output": "max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_workers", + "output": "max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_workers", + "output": "max workers config.toml: Maximum number of workers for Driverless AI server pool (only 1 needed currently)" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cores do? : max cores config.toml: Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cores. : max cores config.toml: Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of cores to use (0 = all): . : Set the max cores config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores", + "output": "max cores config.toml: Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores", + "output": "max cores config.toml: Number of cores to use (0 = all): Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cores", + "output": "max cores config.toml: Number of cores to use (0 = all): Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of cores to use (0 = all): ", + "output": "max cores config.toml: Number of cores to use (0 = all): Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cores", + "output": "max cores config.toml: Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cores", + "output": "max cores config.toml: Number of cores to use (0 = all): Max number of CPU cores to use per experiment. Set to <= 0 to use all (physical) cores.One can also set environment variable 'OMP_NUM_THREADS' to number of cores to use for OpenMP(e.g., in bash: 'export OMP_NUM_THREADS=32' and 'export OPENBLAS_NUM_THREADS=32'). " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cores_dai do? : max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cores_dai. : max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores_dai", + "output": "max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores_dai", + "output": "max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cores dai", + "output": "max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cores_dai", + "output": "max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cores_dai", + "output": "max cores dai config.toml: Max number of CPU cores to use across all of DAI experiments and tasks.-1 is all available, with stall_subprocess_submission_dai_fork_threshold_count=0 means restricted to core count. " + }, + { + "prompt_type": "plain", + "instruction": ": What does virtual_cores_per_physical_core do? : virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "plain", + "instruction": ": Explain virtual_cores_per_physical_core. : virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "virtual_cores_per_physical_core", + "output": "virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "virtual_cores_per_physical_core", + "output": "virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "virtual cores per physical core", + "output": "virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting virtual_cores_per_physical_core", + "output": "virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting virtual_cores_per_physical_core", + "output": "virtual cores per physical core config.toml: Number of virtual cores per physical core (0: auto mode, >=1 use that integer value). If >=1, the reported physical cores in logs will match the virtual cores divided by this value." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_virtual_cores_per_physical_core_if_unequal do? : min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_virtual_cores_per_physical_core_if_unequal. : min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_virtual_cores_per_physical_core_if_unequal", + "output": "min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_virtual_cores_per_physical_core_if_unequal", + "output": "min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min virtual cores per physical core if unequal", + "output": "min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_virtual_cores_per_physical_core_if_unequal", + "output": "min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_virtual_cores_per_physical_core_if_unequal", + "output": "min virtual cores per physical core if unequal config.toml: Mininum number of virtual cores per physical core. Only applies if virtual cores != physical cores. Can help situations like Intel i9 13900 with 24 physical cores and only 32 virtual cores. So better to limit physical cores to 16." + }, + { + "prompt_type": "plain", + "instruction": ": What does override_physical_cores do? : override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain override_physical_cores. : override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_physical_cores", + "output": "override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_physical_cores", + "output": "override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override physical cores", + "output": "override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting override_physical_cores", + "output": "override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting override_physical_cores", + "output": "override physical cores config.toml: Number of physical cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out physical cores correctly, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does override_virtual_cores do? : override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain override_virtual_cores. : override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_virtual_cores", + "output": "override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_virtual_cores", + "output": "override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override virtual cores", + "output": "override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting override_virtual_cores", + "output": "override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting override_virtual_cores", + "output": "override virtual cores config.toml: Number of virtual cores to assume are present (0: auto, >=1 use that integer value). If for some reason DAI does not automatically figure out virtual cores correctly, or only a portion of the system is to be used, one can override with this value. Some systems, especially virtualized, do not always provide correct information about the virtual cores, physical cores, sockets, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does small_data_recipe_work do? : small data recipe work config.toml: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "plain", + "instruction": ": Explain small_data_recipe_work. : small data recipe work config.toml: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Small data work: . : Set the small data recipe work config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "small_data_recipe_work", + "output": "small data recipe work config.toml: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "small_data_recipe_work", + "output": "small data recipe work config.toml: Small data work: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "small data recipe work", + "output": "small data recipe work config.toml: Small data work: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Small data work: ", + "output": "small data recipe work config.toml: Small data work: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting small_data_recipe_work", + "output": "small data recipe work config.toml: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting small_data_recipe_work", + "output": "small data recipe work config.toml: Small data work: Whether to treat data as small recipe in terms of work, by spreading many small tasks across many cores instead of forcing GPUs, for models that support it via static var _use_single_core_if_many. 'auto' looks at _use_single_core_if_many for models and data size, 'on' forces, 'off' disables." + }, + { + "prompt_type": "plain", + "instruction": ": What does stall_subprocess_submission_dai_fork_threshold_count do? : stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain stall_subprocess_submission_dai_fork_threshold_count. : stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_dai_fork_threshold_count", + "output": "stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_dai_fork_threshold_count", + "output": "stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall subprocess submission dai fork threshold count", + "output": "stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stall_subprocess_submission_dai_fork_threshold_count", + "output": "stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stall_subprocess_submission_dai_fork_threshold_count", + "output": "stall subprocess submission dai fork threshold count config.toml: Stall submission of tasks if total DAI fork count exceeds count (-1 to disable, 0 for automatic of max_cores_dai)" + }, + { + "prompt_type": "plain", + "instruction": ": What does stall_subprocess_submission_mem_threshold_pct do? : stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain stall_subprocess_submission_mem_threshold_pct. : stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_mem_threshold_pct", + "output": "stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_mem_threshold_pct", + "output": "stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall subprocess submission mem threshold pct", + "output": "stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stall_subprocess_submission_mem_threshold_pct", + "output": "stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stall_subprocess_submission_mem_threshold_pct", + "output": "stall subprocess submission mem threshold pct config.toml: Stall submission of tasks if system memory available is less than this threshold in percent (set to 0 to disable).Above this threshold, the number of workers in any pool of workers is linearly reduced down to 1 once hitting this threshold. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cores_by_physical do? : max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cores_by_physical. : max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores_by_physical", + "output": "max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores_by_physical", + "output": "max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cores by physical", + "output": "max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cores_by_physical", + "output": "max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cores_by_physical", + "output": "max cores by physical config.toml: Whether to set automatic number of cores by physical (True) or logical (False) count.Using all logical cores can lead to poor performance due to cache thrashing. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cores_limit do? : max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cores_limit. : max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores_limit", + "output": "max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cores_limit", + "output": "max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cores limit", + "output": "max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cores_limit", + "output": "max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cores_limit", + "output": "max cores limit config.toml: Absolute limit to core count" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_fit_cores do? : max fit cores config.toml: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_fit_cores. : max fit cores config.toml: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of cores to use for model fit: . : Set the max fit cores config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_fit_cores", + "output": "max fit cores config.toml: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_fit_cores", + "output": "max fit cores config.toml: Maximum number of cores to use for model fit: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max fit cores", + "output": "max fit cores config.toml: Maximum number of cores to use for model fit: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of cores to use for model fit: ", + "output": "max fit cores config.toml: Maximum number of cores to use for model fit: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_fit_cores", + "output": "max fit cores config.toml: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_fit_cores", + "output": "max fit cores config.toml: Maximum number of cores to use for model fit: Control maximum number of cores to use for a model's fit call (0 = all physical cores >= 1 that count). See also tensorflow_model_max_cores to further limit TensorFlow main models." + }, + { + "prompt_type": "plain", + "instruction": ": What does parallel_score_max_workers do? : parallel score max workers config.toml: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain parallel_score_max_workers. : parallel score max workers config.toml: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of cores to use for model parallel scoring: . : Set the parallel score max workers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parallel_score_max_workers", + "output": "parallel score max workers config.toml: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parallel_score_max_workers", + "output": "parallel score max workers config.toml: Maximum number of cores to use for model parallel scoring: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parallel score max workers", + "output": "parallel score max workers config.toml: Maximum number of cores to use for model parallel scoring: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of cores to use for model parallel scoring: ", + "output": "parallel score max workers config.toml: Maximum number of cores to use for model parallel scoring: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting parallel_score_max_workers", + "output": "parallel score max workers config.toml: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting parallel_score_max_workers", + "output": "parallel score max workers config.toml: Maximum number of cores to use for model parallel scoring: Control maximum number of cores to use for a scoring across all chosen scorers (0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": What does use_dask_cluster do? : use dask cluster config.toml: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_dask_cluster. : use dask cluster config.toml: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: If full dask cluster is enabled, use full cluster: . : Set the use dask cluster config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_dask_cluster", + "output": "use dask cluster config.toml: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_dask_cluster", + "output": "use dask cluster config.toml: If full dask cluster is enabled, use full cluster: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use dask cluster", + "output": "use dask cluster config.toml: If full dask cluster is enabled, use full cluster: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "If full dask cluster is enabled, use full cluster: ", + "output": "use dask cluster config.toml: If full dask cluster is enabled, use full cluster: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_dask_cluster", + "output": "use dask cluster config.toml: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_dask_cluster", + "output": "use dask cluster config.toml: If full dask cluster is enabled, use full cluster: Whether to use full multinode distributed cluster (True) or single-node dask (False).In some cases, using entire cluster can be inefficient. E.g. several DGX nodes can be more efficientif used one DGX at a time for medium-sized data. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_predict_cores do? : max predict cores config.toml: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_predict_cores. : max predict cores config.toml: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of cores to use for model predict: . : Set the max predict cores config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_predict_cores", + "output": "max predict cores config.toml: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_predict_cores", + "output": "max predict cores config.toml: Maximum number of cores to use for model predict: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max predict cores", + "output": "max predict cores config.toml: Maximum number of cores to use for model predict: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of cores to use for model predict: ", + "output": "max predict cores config.toml: Maximum number of cores to use for model predict: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_predict_cores", + "output": "max predict cores config.toml: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_predict_cores", + "output": "max predict cores config.toml: Maximum number of cores to use for model predict: Control maximum number of cores to use for a model's predict call (0 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_predict_cores_in_dai_reduce_factor do? : max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_predict_cores_in_dai_reduce_factor. : max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_predict_cores_in_dai_reduce_factor", + "output": "max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_predict_cores_in_dai_reduce_factor", + "output": "max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max predict cores in dai reduce factor", + "output": "max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_predict_cores_in_dai_reduce_factor", + "output": "max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_predict_cores_in_dai_reduce_factor", + "output": "max predict cores in dai reduce factor config.toml: Factor by which to reduce physical cores, to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_max_predict_cores_in_dai do? : max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_max_predict_cores_in_dai. : max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_predict_cores_in_dai", + "output": "max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_predict_cores_in_dai", + "output": "max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max max predict cores in dai", + "output": "max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_max_predict_cores_in_dai", + "output": "max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_max_predict_cores_in_dai", + "output": "max max predict cores in dai config.toml: Maximum number of cores to use for post-model experiment tasks like autoreport, MLI, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_predict_cores_in_dai do? : max predict cores in dai config.toml: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_predict_cores_in_dai. : max predict cores in dai config.toml: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of cores to use for model transform and predict when doing MLI and AutoDoc.: . : Set the max predict cores in dai config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_predict_cores_in_dai", + "output": "max predict cores in dai config.toml: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_predict_cores_in_dai", + "output": "max predict cores in dai config.toml: Maximum number of cores to use for model transform and predict when doing MLI and AutoDoc.: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max predict cores in dai", + "output": "max predict cores in dai config.toml: Maximum number of cores to use for model transform and predict when doing MLI and AutoDoc.: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of cores to use for model transform and predict when doing MLI and AutoDoc.: ", + "output": "max predict cores in dai config.toml: Maximum number of cores to use for model transform and predict when doing MLI and AutoDoc.: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_predict_cores_in_dai", + "output": "max predict cores in dai config.toml: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_predict_cores_in_dai", + "output": "max predict cores in dai config.toml: Maximum number of cores to use for model transform and predict when doing MLI and AutoDoc.: Control maximum number of cores to use for a model's transform and predict call when doing operations inside DAI-MLI GUI and R/Py client. The main experiment and other tasks like MLI and autoreport have separate queues. The main experiments have run at most worker_remote_processors tasks (limited by cores if auto mode), while other tasks run at most worker_local_processors (limited by cores if auto mode) tasks at the same time, so many small tasks can add up. To prevent overloading the system, the defaults are conservative. However, if most of the activity involves autoreport or MLI, and no model experiments are running, it may be safe to increase this value to something larger than 4. -1 : Auto mode. Up to physical cores divided by 4, up to maximum of 10. 0 : all physical cores >= 1: that count). " + }, + { + "prompt_type": "plain", + "instruction": ": What does batch_cpu_tuning_max_workers do? : batch cpu tuning max workers config.toml: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "plain", + "instruction": ": Explain batch_cpu_tuning_max_workers. : batch cpu tuning max workers config.toml: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Tuning workers per batch for CPU: . : Set the batch cpu tuning max workers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "batch_cpu_tuning_max_workers", + "output": "batch cpu tuning max workers config.toml: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "batch_cpu_tuning_max_workers", + "output": "batch cpu tuning max workers config.toml: Tuning workers per batch for CPU: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "batch cpu tuning max workers", + "output": "batch cpu tuning max workers config.toml: Tuning workers per batch for CPU: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Tuning workers per batch for CPU: ", + "output": "batch cpu tuning max workers config.toml: Tuning workers per batch for CPU: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting batch_cpu_tuning_max_workers", + "output": "batch cpu tuning max workers config.toml: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting batch_cpu_tuning_max_workers", + "output": "batch cpu tuning max workers config.toml: Tuning workers per batch for CPU: Control number of workers used in CPU mode for tuning (0 = socket count -1 = all physical cores >= 1 that count). More workers will be more parallel but models learn less from each other." + }, + { + "prompt_type": "plain", + "instruction": ": What does cpu_max_workers do? : cpu max workers config.toml: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain cpu_max_workers. : cpu max workers config.toml: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. workers for CPU training: . : Set the cpu max workers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cpu_max_workers", + "output": "cpu max workers config.toml: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cpu_max_workers", + "output": "cpu max workers config.toml: Num. workers for CPU training: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cpu max workers", + "output": "cpu max workers config.toml: Num. workers for CPU training: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. workers for CPU training: ", + "output": "cpu max workers config.toml: Num. workers for CPU training: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cpu_max_workers", + "output": "cpu max workers config.toml: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cpu_max_workers", + "output": "cpu max workers config.toml: Num. workers for CPU training: Control number of workers used in CPU mode for training (0 = socket count -1 = all physical cores >= 1 that count)" + }, + { + "prompt_type": "plain", + "instruction": ": What does assumed_simultaneous_dt_forks_munging do? : assumed simultaneous dt forks munging config.toml: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "plain", + "instruction": ": Explain assumed_simultaneous_dt_forks_munging. : assumed simultaneous dt forks munging config.toml: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Assumed/Expected number of munging forks: . : Set the assumed simultaneous dt forks munging config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "assumed_simultaneous_dt_forks_munging", + "output": "assumed simultaneous dt forks munging config.toml: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "assumed_simultaneous_dt_forks_munging", + "output": "assumed simultaneous dt forks munging config.toml: Assumed/Expected number of munging forks: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "assumed simultaneous dt forks munging", + "output": "assumed simultaneous dt forks munging config.toml: Assumed/Expected number of munging forks: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Assumed/Expected number of munging forks: ", + "output": "assumed simultaneous dt forks munging config.toml: Assumed/Expected number of munging forks: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting assumed_simultaneous_dt_forks_munging", + "output": "assumed simultaneous dt forks munging config.toml: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting assumed_simultaneous_dt_forks_munging", + "output": "assumed simultaneous dt forks munging config.toml: Assumed/Expected number of munging forks: Expected maximum number of forks, used to ensure datatable doesn't overload system. For actual use beyond this value, system will start to have slow-down issues" + }, + { + "prompt_type": "plain", + "instruction": ": What does assumed_simultaneous_dt_forks_stats_openblas do? : assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "plain", + "instruction": ": Explain assumed_simultaneous_dt_forks_stats_openblas. : assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "assumed_simultaneous_dt_forks_stats_openblas", + "output": "assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "assumed_simultaneous_dt_forks_stats_openblas", + "output": "assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "assumed simultaneous dt forks stats openblas", + "output": "assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting assumed_simultaneous_dt_forks_stats_openblas", + "output": "assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting assumed_simultaneous_dt_forks_stats_openblas", + "output": "assumed simultaneous dt forks stats openblas config.toml: Expected maximum number of forks by computing statistics during ingestion, used to ensure datatable doesn't overload system" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_max_dt_threads_munging do? : max max dt threads munging config.toml: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_max_dt_threads_munging. : max max dt threads munging config.toml: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. threads for datatable munging: . : Set the max max dt threads munging config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_dt_threads_munging", + "output": "max max dt threads munging config.toml: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_dt_threads_munging", + "output": "max max dt threads munging config.toml: Max. threads for datatable munging: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max max dt threads munging", + "output": "max max dt threads munging config.toml: Max. threads for datatable munging: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. threads for datatable munging: ", + "output": "max max dt threads munging config.toml: Max. threads for datatable munging: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_max_dt_threads_munging", + "output": "max max dt threads munging config.toml: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_max_dt_threads_munging", + "output": "max max dt threads munging config.toml: Max. threads for datatable munging: Maximum of threads for datatable for munging" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_max_dt_threads_stats_openblas do? : max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_max_dt_threads_stats_openblas. : max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_dt_threads_stats_openblas", + "output": "max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_dt_threads_stats_openblas", + "output": "max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max max dt threads stats openblas", + "output": "max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_max_dt_threads_stats_openblas", + "output": "max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_max_dt_threads_stats_openblas", + "output": "max max dt threads stats openblas config.toml: Expected maximum of threads for datatable no matter if many more cores" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_max_dt_threads_readwrite do? : max max dt threads readwrite config.toml: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_max_dt_threads_readwrite. : max max dt threads readwrite config.toml: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. threads for datatable reading/writing: . : Set the max max dt threads readwrite config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_dt_threads_readwrite", + "output": "max max dt threads readwrite config.toml: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_dt_threads_readwrite", + "output": "max max dt threads readwrite config.toml: Max. threads for datatable reading/writing: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max max dt threads readwrite", + "output": "max max dt threads readwrite config.toml: Max. threads for datatable reading/writing: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. threads for datatable reading/writing: ", + "output": "max max dt threads readwrite config.toml: Max. threads for datatable reading/writing: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_max_dt_threads_readwrite", + "output": "max max dt threads readwrite config.toml: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_max_dt_threads_readwrite", + "output": "max max dt threads readwrite config.toml: Max. threads for datatable reading/writing: Maximum of threads for datatable for reading/writing files" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_workers_final_base_models do? : max workers final base models config.toml: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_workers_final_base_models. : max workers final base models config.toml: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. workers for final model building: . : Set the max workers final base models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_final_base_models", + "output": "max workers final base models config.toml: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_final_base_models", + "output": "max workers final base models config.toml: Max. workers for final model building: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max workers final base models", + "output": "max workers final base models config.toml: Max. workers for final model building: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. workers for final model building: ", + "output": "max workers final base models config.toml: Max. workers for final model building: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_workers_final_base_models", + "output": "max workers final base models config.toml: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_workers_final_base_models", + "output": "max workers final base models config.toml: Max. workers for final model building: Maximum parallel workers for final model building.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer or model uses more than the expected amount of memory.Ways to reduce final model building memory usage, e.g. set one or more of these and retrain final model:1) Increase munging_memory_overhead_factor to 102) Increase final_munging_memory_reduction_factor to 103) Lower max_workers_final_munging to 14) Lower max_workers_final_base_models to 15) Lower max_cores to, e.g., 1/2 or 1/4 of physical cores." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_workers_final_munging do? : max workers final munging config.toml: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_workers_final_munging. : max workers final munging config.toml: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. workers for final per-model munging: . : Set the max workers final munging config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_final_munging", + "output": "max workers final munging config.toml: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_final_munging", + "output": "max workers final munging config.toml: Max. workers for final per-model munging: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max workers final munging", + "output": "max workers final munging config.toml: Max. workers for final per-model munging: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. workers for final per-model munging: ", + "output": "max workers final munging config.toml: Max. workers for final per-model munging: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_workers_final_munging", + "output": "max workers final munging config.toml: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_workers_final_munging", + "output": "max workers final munging config.toml: Max. workers for final per-model munging: Maximum parallel workers for final per-model munging.0 means automatic, >=1 means limit to no more than that number of parallel jobs.Can be required if some transformer uses more than the expected amount of memory." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_dt_threads_munging do? : min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_dt_threads_munging. : min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_dt_threads_munging", + "output": "min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_dt_threads_munging", + "output": "min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min dt threads munging", + "output": "min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_dt_threads_munging", + "output": "min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_dt_threads_munging", + "output": "min dt threads munging config.toml: Minimum number of threads for datatable (and OpenMP) during data munging (per process).datatable is the main data munging tool used within Driverless ai (source :https://github.com/h2oai/datatable) " + }, + { + "prompt_type": "plain", + "instruction": ": What does min_dt_threads_final_munging do? : min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_dt_threads_final_munging. : min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_dt_threads_final_munging", + "output": "min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_dt_threads_final_munging", + "output": "min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min dt threads final munging", + "output": "min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_dt_threads_final_munging", + "output": "min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_dt_threads_final_munging", + "output": "min dt threads final munging config.toml: Like min_datatable (and OpenMP)_threads_munging but for final pipeline munging" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_dt_threads_munging do? : max dt threads munging config.toml: Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_dt_threads_munging. : max dt threads munging config.toml: Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. Num. of threads to use for datatable and openblas for munging and model training (0 = all, -1 = auto): . : Set the max dt threads munging config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_munging", + "output": "max dt threads munging config.toml: Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_munging", + "output": "max dt threads munging config.toml: Max. Num. of threads to use for datatable and openblas for munging and model training (0 = all, -1 = auto): Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max dt threads munging", + "output": "max dt threads munging config.toml: Max. Num. of threads to use for datatable and openblas for munging and model training (0 = all, -1 = auto): Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. Num. of threads to use for datatable and openblas for munging and model training (0 = all, -1 = auto): ", + "output": "max dt threads munging config.toml: Max. Num. of threads to use for datatable and openblas for munging and model training (0 = all, -1 = auto): Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_dt_threads_munging", + "output": "max dt threads munging config.toml: Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_dt_threads_munging", + "output": "max dt threads munging config.toml: Max. Num. of threads to use for datatable and openblas for munging and model training (0 = all, -1 = auto): Maximum number of threads for datatable during data munging (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_dt_threads_readwrite do? : max dt threads readwrite config.toml: Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_dt_threads_readwrite. : max dt threads readwrite config.toml: Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. Num. of threads to use for datatable read and write of files (0 = all, -1 = auto): . : Set the max dt threads readwrite config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_readwrite", + "output": "max dt threads readwrite config.toml: Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_readwrite", + "output": "max dt threads readwrite config.toml: Max. Num. of threads to use for datatable read and write of files (0 = all, -1 = auto): Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max dt threads readwrite", + "output": "max dt threads readwrite config.toml: Max. Num. of threads to use for datatable read and write of files (0 = all, -1 = auto): Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. Num. of threads to use for datatable read and write of files (0 = all, -1 = auto): ", + "output": "max dt threads readwrite config.toml: Max. Num. of threads to use for datatable read and write of files (0 = all, -1 = auto): Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_dt_threads_readwrite", + "output": "max dt threads readwrite config.toml: Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_dt_threads_readwrite", + "output": "max dt threads readwrite config.toml: Max. Num. of threads to use for datatable read and write of files (0 = all, -1 = auto): Maximum number of threads for datatable during data reading and writing (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_dt_threads_stats_openblas do? : max dt threads stats openblas config.toml: Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_dt_threads_stats_openblas. : max dt threads stats openblas config.toml: Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. Num. of threads to use for datatable stats and openblas (0 = all, -1 = auto): . : Set the max dt threads stats openblas config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_stats_openblas", + "output": "max dt threads stats openblas config.toml: Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_stats_openblas", + "output": "max dt threads stats openblas config.toml: Max. Num. of threads to use for datatable stats and openblas (0 = all, -1 = auto): Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max dt threads stats openblas", + "output": "max dt threads stats openblas config.toml: Max. Num. of threads to use for datatable stats and openblas (0 = all, -1 = auto): Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. Num. of threads to use for datatable stats and openblas (0 = all, -1 = auto): ", + "output": "max dt threads stats openblas config.toml: Max. Num. of threads to use for datatable stats and openblas (0 = all, -1 = auto): Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_dt_threads_stats_openblas", + "output": "max dt threads stats openblas config.toml: Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_dt_threads_stats_openblas", + "output": "max dt threads stats openblas config.toml: Max. Num. of threads to use for datatable stats and openblas (0 = all, -1 = auto): Maximum number of threads for datatable stats and openblas (per process) (0 = all, -1 = auto).If multiple forks, threads are distributed across forks." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_dt_threads_do_timeseries_split_suggestion do? : max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_dt_threads_do_timeseries_split_suggestion. : max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_do_timeseries_split_suggestion", + "output": "max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_dt_threads_do_timeseries_split_suggestion", + "output": "max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max dt threads do timeseries split suggestion", + "output": "max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_dt_threads_do_timeseries_split_suggestion", + "output": "max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_dt_threads_do_timeseries_split_suggestion", + "output": "max dt threads do timeseries split suggestion config.toml: Maximum number of threads for datatable during TS properties preview panel computations)." + }, + { + "prompt_type": "plain", + "instruction": ": What does num_gpus_per_experiment do? : num gpus per experiment config.toml: Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_gpus_per_experiment. : num gpus per experiment config.toml: Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: #GPUs/Experiment (-1 = autodetect or all): . : Set the num gpus per experiment config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_per_experiment", + "output": "num gpus per experiment config.toml: Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_per_experiment", + "output": "num gpus per experiment config.toml: #GPUs/Experiment (-1 = autodetect or all): Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num gpus per experiment", + "output": "num gpus per experiment config.toml: #GPUs/Experiment (-1 = autodetect or all): Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "#GPUs/Experiment (-1 = autodetect or all): ", + "output": "num gpus per experiment config.toml: #GPUs/Experiment (-1 = autodetect or all): Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_gpus_per_experiment", + "output": "num gpus per experiment config.toml: Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_gpus_per_experiment", + "output": "num gpus per experiment config.toml: #GPUs/Experiment (-1 = autodetect or all): Number of GPUs to use per experiment for training task. Set to -1 for all GPUs.An experiment will generate many different models.Currently num_gpus_per_experiment!=-1 disables GPU locking, so is only recommended forsingle experiments and single users.Ignored if GPUs disabled or no GPUs on system.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using dask, this refers to the per-node value.For ImageAutoModel, this refers to the total number of GPUs used for that entire model type,since there is only one model type for the entire experiment.E.g. if have 4 GPUs and want 2 ImageAuto experiments to run on 2 GPUs each, can setnum_gpus_per_experiment to 2 for each experiment, and each of the 4 GPUs will be used one at a timeby the 2 experiments each using 2 GPUs only. " + }, + { + "prompt_type": "plain", + "instruction": ": What does min_num_cores_per_gpu do? : min num cores per gpu config.toml: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_num_cores_per_gpu. : min num cores per gpu config.toml: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num Cores/GPU: . : Set the min num cores per gpu config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_num_cores_per_gpu", + "output": "min num cores per gpu config.toml: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_num_cores_per_gpu", + "output": "min num cores per gpu config.toml: Num Cores/GPU: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min num cores per gpu", + "output": "min num cores per gpu config.toml: Num Cores/GPU: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num Cores/GPU: ", + "output": "min num cores per gpu config.toml: Num Cores/GPU: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_num_cores_per_gpu", + "output": "min num cores per gpu config.toml: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_num_cores_per_gpu", + "output": "min num cores per gpu config.toml: Num Cores/GPU: Number of CPU cores per GPU. Limits number of GPUs in order to have sufficient cores per GPU. Set to -1 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": What does num_gpus_per_model do? : num gpus per model config.toml: Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_gpus_per_model. : num gpus per model config.toml: Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: #GPUs/Model (-1 = all): . : Set the num gpus per model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_per_model", + "output": "num gpus per model config.toml: Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_per_model", + "output": "num gpus per model config.toml: #GPUs/Model (-1 = all): Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num gpus per model", + "output": "num gpus per model config.toml: #GPUs/Model (-1 = all): Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "#GPUs/Model (-1 = all): ", + "output": "num gpus per model config.toml: #GPUs/Model (-1 = all): Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_gpus_per_model", + "output": "num gpus per model config.toml: Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_gpus_per_model", + "output": "num gpus per model config.toml: #GPUs/Model (-1 = all): Number of GPUs to use per model training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model.Only applicable currently to image auto pipeline building recipe or Dask models with more than one GPU or more than one node.Ignored if GPUs disabled or no GPUs on system.For ImageAutoModel, the maximum of num_gpus_per_model and num_gpus_per_experiment (all GPUs if -1) is taken.More info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationIn multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_gpus_for_prediction do? : num gpus for prediction config.toml: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_gpus_for_prediction. : num gpus for prediction config.toml: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. of GPUs for isolated prediction/transform: . : Set the num gpus for prediction config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_for_prediction", + "output": "num gpus for prediction config.toml: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_for_prediction", + "output": "num gpus for prediction config.toml: Num. of GPUs for isolated prediction/transform: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num gpus for prediction", + "output": "num gpus for prediction config.toml: Num. of GPUs for isolated prediction/transform: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. of GPUs for isolated prediction/transform: ", + "output": "num gpus for prediction config.toml: Num. of GPUs for isolated prediction/transform: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_gpus_for_prediction", + "output": "num gpus for prediction config.toml: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_gpus_for_prediction", + "output": "num gpus for prediction config.toml: Num. of GPUs for isolated prediction/transform: Number of GPUs to use for predict for models and transform for transformers when running outside of fit/fit_transform.-1 means all, 0 means no GPUs, >1 means that many GPUs up to visible limit.If predict/transform are called in same process as fit/fit_transform, number of GPUs will match,while new processes will use this count for number of GPUs for applicable models/transformers.Exception: TensorFlow, PyTorch models/transformers, and RAPIDS predict on GPU always if GPUs exist.RAPIDS requires python scoring package be used also on GPUs.In multinode context when using Dask, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": What does gpu_id_start do? : gpu id start config.toml: Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "plain", + "instruction": ": Explain gpu_id_start. : gpu id start config.toml: Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: GPU starting ID (0..visible #GPUs - 1): . : Set the gpu id start config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_id_start", + "output": "gpu id start config.toml: Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_id_start", + "output": "gpu id start config.toml: GPU starting ID (0..visible #GPUs - 1): Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu id start", + "output": "gpu id start config.toml: GPU starting ID (0..visible #GPUs - 1): Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "GPU starting ID (0..visible #GPUs - 1): ", + "output": "gpu id start config.toml: GPU starting ID (0..visible #GPUs - 1): Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gpu_id_start", + "output": "gpu id start config.toml: Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gpu_id_start", + "output": "gpu id start config.toml: GPU starting ID (0..visible #GPUs - 1): Which gpu_id to start with-1 : auto-mode. E.g. 2 experiments can each set num_gpus_per_experiment to 2 and use 4 GPUsIf using CUDA_VISIBLE_DEVICES=... to control GPUs (preferred method), gpu_id=0 is thefirst in that restricted list of devices.E.g. if CUDA_VISIBLE_DEVICES='4,5' then gpu_id_start=0 will refer to thedevice #4.E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 2 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=1, gpu_id_start=1E.g. from expert mode, to run 2 experiments, each on a distinct GPU out of 8 GPUs:Experiment#1: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=1, num_gpus_per_experiment=4, gpu_id_start=4E.g. Like just above, but now run on all 4 GPUs/modelExperiment#1: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=0Experiment#2: num_gpus_per_model=4, num_gpus_per_experiment=4, gpu_id_start=4If num_gpus_per_model!=1, global GPU locking is disabled(because underlying algorithms don't support arbitrary gpu ids, only sequential ids),so must setup above correctly to avoid overlap across all experiments by all usersMore info at: https://github.com/NVIDIA/nvidia-docker/wiki/nvidia-docker#gpu-isolationNote that GPU selection does not wrap, so gpu_id_start + num_gpus_per_model must be less than number of visibile GPUs " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_reduce_features_when_failure do? : allow reduce features when failure config.toml: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_reduce_features_when_failure. : allow reduce features when failure config.toml: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to reduce features when model fails: . : Set the allow reduce features when failure config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_reduce_features_when_failure", + "output": "allow reduce features when failure config.toml: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_reduce_features_when_failure", + "output": "allow reduce features when failure config.toml: Whether to reduce features when model fails: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow reduce features when failure", + "output": "allow reduce features when failure config.toml: Whether to reduce features when model fails: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to reduce features when model fails: ", + "output": "allow reduce features when failure config.toml: Whether to reduce features when model fails: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_reduce_features_when_failure", + "output": "allow reduce features when failure config.toml: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_reduce_features_when_failure", + "output": "allow reduce features when failure config.toml: Whether to reduce features when model fails: Whether to reduce features until model does not fail.Currently for non-dask XGBoost models (i.e. GLMModel, XGBoostGBMModel, XGBoostDartModel, XGBoostRFModel),during normal fit or when using Optuna.Primarily useful for GPU OOM.If XGBoost runs out of GPU memory, this is detected, and(regardless of setting of skip_model_failures),we perform feature selection using XGBoost on subsets of features.The dataset is progressively reduced by factor of 2 with more models to cover all features.This splitting continues until no failure occurs.Then all sub-models are used to estimate variable importance by absolute information gain,in order to decide which features to include.Finally, a single model with the most important featuresis built using the feature count that did not lead to OOM.For 'auto', this option is set to 'off' when reproducible experiment is enabled,because the condition of running OOM can change for same experiment seed.Reduction is only done on features and not on rows for the feature selection step. " + }, + { + "prompt_type": "plain", + "instruction": ": What does reduce_repeats_when_failure do? : reduce repeats when failure config.toml: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain reduce_repeats_when_failure. : reduce repeats when failure config.toml: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of repeats for models used for feature selection during failure recovery.: . : Set the reduce repeats when failure config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reduce_repeats_when_failure", + "output": "reduce repeats when failure config.toml: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reduce_repeats_when_failure", + "output": "reduce repeats when failure config.toml: Number of repeats for models used for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reduce repeats when failure", + "output": "reduce repeats when failure config.toml: Number of repeats for models used for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of repeats for models used for feature selection during failure recovery.: ", + "output": "reduce repeats when failure config.toml: Number of repeats for models used for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting reduce_repeats_when_failure", + "output": "reduce repeats when failure config.toml: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting reduce_repeats_when_failure", + "output": "reduce repeats when failure config.toml: Number of repeats for models used for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls how many repeats of sub-modelsused for feature selection. A single repeat only has each sub-modelconsider a single sub-set of features, while repeats shuffle whichfeatures are considered allowing more chance to find important interactions.More repeats can lead to higher accuracy.The cost of this option is proportional to the repeat count. " + }, + { + "prompt_type": "plain", + "instruction": ": What does fraction_anchor_reduce_features_when_failure do? : fraction anchor reduce features when failure config.toml: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fraction_anchor_reduce_features_when_failure. : fraction anchor reduce features when failure config.toml: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fraction of features treated as anchor for feature selection during failure recovery.: . : Set the fraction anchor reduce features when failure config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fraction_anchor_reduce_features_when_failure", + "output": "fraction anchor reduce features when failure config.toml: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fraction_anchor_reduce_features_when_failure", + "output": "fraction anchor reduce features when failure config.toml: Fraction of features treated as anchor for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fraction anchor reduce features when failure", + "output": "fraction anchor reduce features when failure config.toml: Fraction of features treated as anchor for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fraction of features treated as anchor for feature selection during failure recovery.: ", + "output": "fraction anchor reduce features when failure config.toml: Fraction of features treated as anchor for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fraction_anchor_reduce_features_when_failure", + "output": "fraction anchor reduce features when failure config.toml: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fraction_anchor_reduce_features_when_failure", + "output": "fraction anchor reduce features when failure config.toml: Fraction of features treated as anchor for feature selection during failure recovery.: With allow_reduce_features_when_failure, this controls the fraction of featurestreated as an anchor that are fixed for all sub-models.Each repeat gets new anchors.For tuning and evolution, the probability dependsupon any prior importance (if present) from other individuals,while final model uses uniform probability for anchor features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does xgboost_reduce_on_errors_list do? : xgboost reduce on errors list config.toml: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain xgboost_reduce_on_errors_list. : xgboost reduce on errors list config.toml: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Errors from XGBoost that trigger reduction of features: . : Set the xgboost reduce on errors list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_reduce_on_errors_list", + "output": "xgboost reduce on errors list config.toml: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_reduce_on_errors_list", + "output": "xgboost reduce on errors list config.toml: Errors from XGBoost that trigger reduction of features: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost reduce on errors list", + "output": "xgboost reduce on errors list config.toml: Errors from XGBoost that trigger reduction of features: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Errors from XGBoost that trigger reduction of features: ", + "output": "xgboost reduce on errors list config.toml: Errors from XGBoost that trigger reduction of features: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting xgboost_reduce_on_errors_list", + "output": "xgboost reduce on errors list config.toml: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting xgboost_reduce_on_errors_list", + "output": "xgboost reduce on errors list config.toml: Errors from XGBoost that trigger reduction of features: Error strings from XGBoost that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_reduce_on_errors_list do? : lightgbm reduce on errors list config.toml: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_reduce_on_errors_list. : lightgbm reduce on errors list config.toml: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Errors from LightGBM that trigger reduction of features: . : Set the lightgbm reduce on errors list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_reduce_on_errors_list", + "output": "lightgbm reduce on errors list config.toml: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_reduce_on_errors_list", + "output": "lightgbm reduce on errors list config.toml: Errors from LightGBM that trigger reduction of features: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm reduce on errors list", + "output": "lightgbm reduce on errors list config.toml: Errors from LightGBM that trigger reduction of features: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Errors from LightGBM that trigger reduction of features: ", + "output": "lightgbm reduce on errors list config.toml: Errors from LightGBM that trigger reduction of features: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_reduce_on_errors_list", + "output": "lightgbm reduce on errors list config.toml: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_reduce_on_errors_list", + "output": "lightgbm reduce on errors list config.toml: Errors from LightGBM that trigger reduction of features: Error strings from LightGBM that are used to trigger re-fit on reduced sub-models.See allow_reduce_features_when_failure. " + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_use_gpu do? : lightgbm use gpu config.toml: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_use_gpu. : lightgbm use gpu config.toml: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to use GPUs for LightGBM: . : Set the lightgbm use gpu config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_use_gpu", + "output": "lightgbm use gpu config.toml: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_use_gpu", + "output": "lightgbm use gpu config.toml: Whether to use GPUs for LightGBM: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm use gpu", + "output": "lightgbm use gpu config.toml: Whether to use GPUs for LightGBM: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to use GPUs for LightGBM: ", + "output": "lightgbm use gpu config.toml: Whether to use GPUs for LightGBM: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_use_gpu", + "output": "lightgbm use gpu config.toml: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_use_gpu", + "output": "lightgbm use gpu config.toml: Whether to use GPUs for LightGBM: LightGBM does not significantly benefit from GPUs, unlike other tools like XGBoost or Bert/Image Models. Each experiment will try to use all GPUs, and on systems with many cores and GPUs, this leads to many experiments running at once, all trying to lock the GPU for use, leaving the cores heavily under-utilized. So by default, DAI always uses CPU for LightGBM, unless 'on' is specified." + }, + { + "prompt_type": "plain", + "instruction": ": What does kaggle_username do? : kaggle username config.toml: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "plain", + "instruction": ": Explain kaggle_username. : kaggle username config.toml: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Kaggle username: . : Set the kaggle username config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_username", + "output": "kaggle username config.toml: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_username", + "output": "kaggle username config.toml: Kaggle username: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle username", + "output": "kaggle username config.toml: Kaggle username: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Kaggle username: ", + "output": "kaggle username config.toml: Kaggle username: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kaggle_username", + "output": "kaggle username config.toml: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kaggle_username", + "output": "kaggle username config.toml: Kaggle username: Kaggle username for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "plain", + "instruction": ": What does kaggle_key do? : kaggle key config.toml: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "plain", + "instruction": ": Explain kaggle_key. : kaggle key config.toml: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Kaggle key: . : Set the kaggle key config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_key", + "output": "kaggle key config.toml: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_key", + "output": "kaggle key config.toml: Kaggle key: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle key", + "output": "kaggle key config.toml: Kaggle key: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Kaggle key: ", + "output": "kaggle key config.toml: Kaggle key: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kaggle_key", + "output": "kaggle key config.toml: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kaggle_key", + "output": "kaggle key config.toml: Kaggle key: Kaggle key for automatic submission and scoring of test set predictions.See https://github.com/Kaggle/kaggle-api#api-credentials for details on how to obtain Kaggle API credentials\", " + }, + { + "prompt_type": "plain", + "instruction": ": What does kaggle_timeout do? : kaggle timeout config.toml: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "plain", + "instruction": ": Explain kaggle_timeout. : kaggle timeout config.toml: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Kaggle submission timeout in seconds: . : Set the kaggle timeout config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_timeout", + "output": "kaggle timeout config.toml: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_timeout", + "output": "kaggle timeout config.toml: Kaggle submission timeout in seconds: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle timeout", + "output": "kaggle timeout config.toml: Kaggle submission timeout in seconds: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Kaggle submission timeout in seconds: ", + "output": "kaggle timeout config.toml: Kaggle submission timeout in seconds: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kaggle_timeout", + "output": "kaggle timeout config.toml: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kaggle_timeout", + "output": "kaggle timeout config.toml: Kaggle submission timeout in seconds: Max. number of seconds to wait for Kaggle API call to return scores for given predictions" + }, + { + "prompt_type": "plain", + "instruction": ": What does kaggle_keep_submission do? : kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain kaggle_keep_submission. : kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_keep_submission", + "output": "kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_keep_submission", + "output": "kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle keep submission", + "output": "kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to keep Kaggle submission file in experiment directory: ", + "output": "kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kaggle_keep_submission", + "output": "kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kaggle_keep_submission", + "output": "kaggle keep submission config.toml: Whether to keep Kaggle submission file in experiment directory: " + }, + { + "prompt_type": "plain", + "instruction": ": What does kaggle_competitions do? : kaggle competitions config.toml: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "plain", + "instruction": ": Explain kaggle_competitions. : kaggle competitions config.toml: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Custom Kaggle competitions to make automatic test set submissions for.: . : Set the kaggle competitions config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_competitions", + "output": "kaggle competitions config.toml: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle_competitions", + "output": "kaggle competitions config.toml: Custom Kaggle competitions to make automatic test set submissions for.: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kaggle competitions", + "output": "kaggle competitions config.toml: Custom Kaggle competitions to make automatic test set submissions for.: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Custom Kaggle competitions to make automatic test set submissions for.: ", + "output": "kaggle competitions config.toml: Custom Kaggle competitions to make automatic test set submissions for.: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kaggle_competitions", + "output": "kaggle competitions config.toml: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kaggle_competitions", + "output": "kaggle competitions config.toml: Custom Kaggle competitions to make automatic test set submissions for.: If provided, can extend the list to arbitrary and potentially future Kaggle competitions to make submissions for. Only used if kaggle_key and kaggle_username are provided. Provide a quoted comma-separated list of tuples (target column name, number of test rows, competition, metric) like this: kaggle_competitions='(\"target\", 200000, \"santander-customer-transaction-prediction\", \"AUC\"), (\"TARGET\", 75818, \"santander-customer-satisfaction\", \"AUC\")' " + }, + { + "prompt_type": "plain", + "instruction": ": What does ping_period do? : ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ping_period. : ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_period", + "output": "ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_period", + "output": "ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping period", + "output": "ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ping_period", + "output": "ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ping_period", + "output": "ping period config.toml: Period (in seconds) of ping by Driverless AI server to each experiment (in order to get logger info like disk space and memory usage). 0 means don't print anything." + }, + { + "prompt_type": "plain", + "instruction": ": What does ping_autodl do? : ping autodl config.toml: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ping_autodl. : ping autodl config.toml: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to enable ping of system status during DAI experiments.: . : Set the ping autodl config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_autodl", + "output": "ping autodl config.toml: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_autodl", + "output": "ping autodl config.toml: Whether to enable ping of system status during DAI experiments.: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping autodl", + "output": "ping autodl config.toml: Whether to enable ping of system status during DAI experiments.: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable ping of system status during DAI experiments.: ", + "output": "ping autodl config.toml: Whether to enable ping of system status during DAI experiments.: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ping_autodl", + "output": "ping autodl config.toml: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ping_autodl", + "output": "ping autodl config.toml: Whether to enable ping of system status during DAI experiments.: Whether to enable ping of system status during DAI experiments." + }, + { + "prompt_type": "plain", + "instruction": ": What does disk_limit_gb do? : disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain disk_limit_gb. : disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disk_limit_gb", + "output": "disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disk_limit_gb", + "output": "disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disk limit gb", + "output": "disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting disk_limit_gb", + "output": "disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting disk_limit_gb", + "output": "disk limit gb config.toml: Minimum amount of disk space in GB needed to run experiments. Experiments will fail if this limit is crossed. This limit exists because Driverless AI needs to generate data for model training feature engineering, documentation and other such processes." + }, + { + "prompt_type": "plain", + "instruction": ": What does stall_disk_limit_gb do? : stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "plain", + "instruction": ": Explain stall_disk_limit_gb. : stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_disk_limit_gb", + "output": "stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_disk_limit_gb", + "output": "stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall disk limit gb", + "output": "stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stall_disk_limit_gb", + "output": "stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stall_disk_limit_gb", + "output": "stall disk limit gb config.toml: Minimum amount of disk space in GB needed to before stall forking of new processes during an experiment." + }, + { + "prompt_type": "plain", + "instruction": ": What does memory_limit_gb do? : memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "plain", + "instruction": ": Explain memory_limit_gb. : memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "memory_limit_gb", + "output": "memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "memory_limit_gb", + "output": "memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "memory limit gb", + "output": "memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting memory_limit_gb", + "output": "memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting memory_limit_gb", + "output": "memory limit gb config.toml: Minimum amount of system memory in GB needed to start experiments. Similarly with disk space, a certain amount of system memory is needed to run some basic operations." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_num_rows do? : min num rows config.toml: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_num_rows. : min num rows config.toml: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. number of rows needed to run experiment: . : Set the min num rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_num_rows", + "output": "min num rows config.toml: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_num_rows", + "output": "min num rows config.toml: Min. number of rows needed to run experiment: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min num rows", + "output": "min num rows config.toml: Min. number of rows needed to run experiment: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. number of rows needed to run experiment: ", + "output": "min num rows config.toml: Min. number of rows needed to run experiment: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_num_rows", + "output": "min num rows config.toml: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_num_rows", + "output": "min num rows config.toml: Min. number of rows needed to run experiment: Minimum number of rows needed to run experiments (values lower than 100 might not work).A minimum threshold is set to ensure there is enough data to create a statisticallyreliable model and avoid other small-data related failures. " + }, + { + "prompt_type": "plain", + "instruction": ": What does min_rows_per_class do? : min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_rows_per_class. : min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_rows_per_class", + "output": "min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_rows_per_class", + "output": "min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min rows per class", + "output": "min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_rows_per_class", + "output": "min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_rows_per_class", + "output": "min rows per class config.toml: Minimum required number of rows (in the training data) for each class label for classification problems." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_rows_per_split do? : min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_rows_per_split. : min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_rows_per_split", + "output": "min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_rows_per_split", + "output": "min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min rows per split", + "output": "min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_rows_per_split", + "output": "min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_rows_per_split", + "output": "min rows per split config.toml: Minimum required number of rows for each split when generating validation samples." + }, + { + "prompt_type": "plain", + "instruction": ": What does reproducibility_level do? : reproducibility level config.toml: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain reproducibility_level. : reproducibility level config.toml: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Reproducibility Level: . : Set the reproducibility level config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reproducibility_level", + "output": "reproducibility level config.toml: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reproducibility_level", + "output": "reproducibility level config.toml: Reproducibility Level: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reproducibility level", + "output": "reproducibility level config.toml: Reproducibility Level: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Reproducibility Level: ", + "output": "reproducibility level config.toml: Reproducibility Level: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting reproducibility_level", + "output": "reproducibility level config.toml: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting reproducibility_level", + "output": "reproducibility level config.toml: Reproducibility Level: Level of reproducibility desired (for same data and same inputs).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API).Supported levels are: reproducibility_level = 1 for same experiment results as long as same O/S, same CPU(s) and same GPU(s) reproducibility_level = 2 for same experiment results as long as same O/S, same CPU architecture and same GPU architecture reproducibility_level = 3 for same experiment results as long as same O/S, same CPU architecture, not using GPUs reproducibility_level = 4 for same experiment results as long as same O/S, (best effort) " + }, + { + "prompt_type": "plain", + "instruction": ": What does seed do? : seed config.toml: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain seed. : seed config.toml: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Random seed: . : Set the seed config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "seed", + "output": "seed config.toml: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "seed", + "output": "seed config.toml: Random seed: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "seed", + "output": "seed config.toml: Random seed: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Random seed: ", + "output": "seed config.toml: Random seed: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting seed", + "output": "seed config.toml: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting seed", + "output": "seed config.toml: Random seed: Seed for random number generator to make experiments reproducible, to a certain reproducibility level (see above).Only active if 'reproducible' mode is enabled (GUI button enabled or a seed is set from the client API). " + }, + { + "prompt_type": "plain", + "instruction": ": What does missing_values do? : missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "plain", + "instruction": ": Explain missing_values. : missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "missing_values", + "output": "missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "missing_values", + "output": "missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "missing values", + "output": "missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting missing_values", + "output": "missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting missing_values", + "output": "missing values config.toml: The list of values that should be interpreted as missing values during data import. This applies to both numeric and string columns. Note that the dataset must be reloaded after applying changes to this config via the expert settings. Also note that 'nan' is always interpreted as a missing value for numeric columns." + }, + { + "prompt_type": "plain", + "instruction": ": What does glm_nan_impute_training_data do? : glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "plain", + "instruction": ": Explain glm_nan_impute_training_data. : glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_nan_impute_training_data", + "output": "glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_nan_impute_training_data", + "output": "glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm nan impute training data", + "output": "glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting glm_nan_impute_training_data", + "output": "glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting glm_nan_impute_training_data", + "output": "glm nan impute training data config.toml: Whether to impute (to mean) for GLM on training data." + }, + { + "prompt_type": "plain", + "instruction": ": What does glm_nan_impute_validation_data do? : glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "plain", + "instruction": ": Explain glm_nan_impute_validation_data. : glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_nan_impute_validation_data", + "output": "glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_nan_impute_validation_data", + "output": "glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm nan impute validation data", + "output": "glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting glm_nan_impute_validation_data", + "output": "glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting glm_nan_impute_validation_data", + "output": "glm nan impute validation data config.toml: Whether to impute (to mean) for GLM on validation data." + }, + { + "prompt_type": "plain", + "instruction": ": What does glm_nan_impute_prediction_data do? : glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain glm_nan_impute_prediction_data. : glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_nan_impute_prediction_data", + "output": "glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_nan_impute_prediction_data", + "output": "glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm nan impute prediction data", + "output": "glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting glm_nan_impute_prediction_data", + "output": "glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting glm_nan_impute_prediction_data", + "output": "glm nan impute prediction data config.toml: Whether to impute (to mean) for GLM on prediction data (required for consistency with MOJO)." + }, + { + "prompt_type": "plain", + "instruction": ": What does tf_nan_impute_value do? : tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tf_nan_impute_value. : tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tf_nan_impute_value", + "output": "tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tf_nan_impute_value", + "output": "tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tf nan impute value", + "output": "tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tf_nan_impute_value", + "output": "tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tf_nan_impute_value", + "output": "tf nan impute value config.toml: For tensorflow, what numerical value to give to missing values, where numeric values are standardized. So 0 is center of distribution, and if Normal distribution then +-5 is 5 standard deviations away from the center. In many cases, an out of bounds value is a good way to represent missings, but in some cases the mean (0) may be better." + }, + { + "prompt_type": "plain", + "instruction": ": What does statistical_threshold_data_size_small do? : statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "plain", + "instruction": ": Explain statistical_threshold_data_size_small. : statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "statistical_threshold_data_size_small", + "output": "statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "statistical_threshold_data_size_small", + "output": "statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "statistical threshold data size small", + "output": "statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting statistical_threshold_data_size_small", + "output": "statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting statistical_threshold_data_size_small", + "output": "statistical threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (small data recipe like including one hot encoding for all model types, and smaller learning rate) to increase model accuracy" + }, + { + "prompt_type": "plain", + "instruction": ": What does statistical_threshold_data_size_large do? : statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "plain", + "instruction": ": Explain statistical_threshold_data_size_large. : statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "statistical_threshold_data_size_large", + "output": "statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "statistical_threshold_data_size_large", + "output": "statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "statistical threshold data size large", + "output": "statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting statistical_threshold_data_size_large", + "output": "statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting statistical_threshold_data_size_large", + "output": "statistical threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain statistical techniques (fewer genes created, removal of high max_depth for tree models, etc.) that can speed up modeling. Also controls maximum rows used in training final model, by sampling statistical_threshold_data_size_large / columns number of rows" + }, + { + "prompt_type": "plain", + "instruction": ": What does aux_threshold_data_size_large do? : aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "plain", + "instruction": ": Explain aux_threshold_data_size_large. : aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aux_threshold_data_size_large", + "output": "aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aux_threshold_data_size_large", + "output": "aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aux threshold data size large", + "output": "aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting aux_threshold_data_size_large", + "output": "aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting aux_threshold_data_size_large", + "output": "aux threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger sampling for auxiliary data uses, like imbalanced data set detection and bootstrap scoring sample size and iterations" + }, + { + "prompt_type": "plain", + "instruction": ": What does set_method_sampling_row_limit do? : set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain set_method_sampling_row_limit. : set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "set_method_sampling_row_limit", + "output": "set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "set_method_sampling_row_limit", + "output": "set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "set method sampling row limit", + "output": "set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting set_method_sampling_row_limit", + "output": "set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting set_method_sampling_row_limit", + "output": "set method sampling row limit config.toml: Internal threshold for set-based method for sampling without replacement. Can be 10x faster than np_random_choice internal optimized method, and up to 30x faster than np.random.choice to sample 250k rows from 1B rows etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does performance_threshold_data_size_small do? : performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "plain", + "instruction": ": Explain performance_threshold_data_size_small. : performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "performance_threshold_data_size_small", + "output": "performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "performance_threshold_data_size_small", + "output": "performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "performance threshold data size small", + "output": "performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting performance_threshold_data_size_small", + "output": "performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting performance_threshold_data_size_small", + "output": "performance threshold data size small config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "plain", + "instruction": ": What does performance_threshold_data_size_large do? : performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "plain", + "instruction": ": Explain performance_threshold_data_size_large. : performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "performance_threshold_data_size_large", + "output": "performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "performance_threshold_data_size_large", + "output": "performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "performance threshold data size large", + "output": "performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting performance_threshold_data_size_large", + "output": "performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting performance_threshold_data_size_large", + "output": "performance threshold data size large config.toml: Internal threshold for number of rows x number of columns to trigger certain changes in performance (fewer threads if beyond large value) to help avoid OOM or unnecessary slowdowns (fewer threads if lower than small value) to avoid excess forking of tasks" + }, + { + "prompt_type": "plain", + "instruction": ": What does gpu_default_threshold_data_size_large do? : gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "plain", + "instruction": ": Explain gpu_default_threshold_data_size_large. : gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_default_threshold_data_size_large", + "output": "gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_default_threshold_data_size_large", + "output": "gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu default threshold data size large", + "output": "gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gpu_default_threshold_data_size_large", + "output": "gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gpu_default_threshold_data_size_large", + "output": "gpu default threshold data size large config.toml: Threshold for number of rows x number of columns to trigger GPU to be default for models like XGBoost GBM." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_relative_cols_mismatch_allowed do? : max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_relative_cols_mismatch_allowed. : max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_relative_cols_mismatch_allowed", + "output": "max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_relative_cols_mismatch_allowed", + "output": "max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max relative cols mismatch allowed", + "output": "max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_relative_cols_mismatch_allowed", + "output": "max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_relative_cols_mismatch_allowed", + "output": "max relative cols mismatch allowed config.toml: Maximum fraction of mismatched columns to allow between train and either valid or test. Beyond this value the experiment will fail with invalid data error." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_wide_rules do? : enable wide rules config.toml: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_wide_rules. : enable wide rules config.toml: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Wide Rules: . : Set the enable wide rules config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_wide_rules", + "output": "enable wide rules config.toml: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_wide_rules", + "output": "enable wide rules config.toml: Enable Wide Rules: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable wide rules", + "output": "enable wide rules config.toml: Enable Wide Rules: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Wide Rules: ", + "output": "enable wide rules config.toml: Enable Wide Rules: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_wide_rules", + "output": "enable wide rules config.toml: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_wide_rules", + "output": "enable wide rules config.toml: Enable Wide Rules: Enable various rules to handle wide (Num. columns > Num. rows) datasets ('auto'/'on'/'off'). Setting on forces rules to be enabled regardless of columns." + }, + { + "prompt_type": "plain", + "instruction": ": What does wide_factor do? : wide factor config.toml: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain wide_factor. : wide factor config.toml: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Wide rules factor: . : Set the wide factor config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wide_factor", + "output": "wide factor config.toml: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wide_factor", + "output": "wide factor config.toml: Wide rules factor: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wide factor", + "output": "wide factor config.toml: Wide rules factor: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Wide rules factor: ", + "output": "wide factor config.toml: Wide rules factor: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wide_factor", + "output": "wide factor config.toml: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wide_factor", + "output": "wide factor config.toml: Wide rules factor: If columns > wide_factor * rows, then enable wide rules if auto. For columns > rows, random forest is always enabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cols do? : max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cols. : max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols", + "output": "max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols", + "output": "max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cols", + "output": "max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cols", + "output": "max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cols", + "output": "max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_col_stats do? : max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_col_stats. : max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_col_stats", + "output": "max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_col_stats", + "output": "max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows col stats", + "output": "max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_col_stats", + "output": "max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_col_stats", + "output": "max rows col stats config.toml: Largest number of rows to use for column stats, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_cv_in_cv_gini do? : max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_cv_in_cv_gini. : max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_cv_in_cv_gini", + "output": "max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_cv_in_cv_gini", + "output": "max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows cv in cv gini", + "output": "max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_cv_in_cv_gini", + "output": "max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_cv_in_cv_gini", + "output": "max rows cv in cv gini config.toml: Largest number of rows to use for cv in cv for target encoding when doing gini scoring test" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_constant_model do? : max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_constant_model. : max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_constant_model", + "output": "max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_constant_model", + "output": "max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows constant model", + "output": "max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_constant_model", + "output": "max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_constant_model", + "output": "max rows constant model config.toml: Largest number of rows to use for constant model fit, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_final_ensemble_base_model_fold_scores do? : max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_final_ensemble_base_model_fold_scores. : max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_ensemble_base_model_fold_scores", + "output": "max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_ensemble_base_model_fold_scores", + "output": "max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows final ensemble base model fold scores", + "output": "max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_final_ensemble_base_model_fold_scores", + "output": "max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_final_ensemble_base_model_fold_scores", + "output": "max rows final ensemble base model fold scores config.toml: Largest number of rows to use for final ensemble base model fold cores, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_final_blender do? : max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_final_blender. : max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_blender", + "output": "max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_blender", + "output": "max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows final blender", + "output": "max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_final_blender", + "output": "max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_final_blender", + "output": "max rows final blender config.toml: Largest number of rows to use for final ensemble blender for regression and binary (scaled down linearly by number of classes for multiclass for >= 10 classes), otherwise sample randomly." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_rows_final_blender do? : min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_rows_final_blender. : min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_rows_final_blender", + "output": "min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_rows_final_blender", + "output": "min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min rows final blender", + "output": "min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_rows_final_blender", + "output": "min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_rows_final_blender", + "output": "min rows final blender config.toml: Smallest number of rows (or number of rows if less than this) to use for final ensemble blender." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_final_train_score do? : max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_final_train_score. : max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_train_score", + "output": "max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_train_score", + "output": "max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows final train score", + "output": "max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_final_train_score", + "output": "max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_final_train_score", + "output": "max rows final train score config.toml: Largest number of rows to use for final training score (no holdout), otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_final_roccmconf do? : max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_final_roccmconf. : max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_roccmconf", + "output": "max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_roccmconf", + "output": "max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows final roccmconf", + "output": "max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_final_roccmconf", + "output": "max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_final_roccmconf", + "output": "max rows final roccmconf config.toml: Largest number of rows to use for final ROC, lift-gains, confusion matrix, residual, and actual vs. predicted. Otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_final_holdout_score do? : max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_final_holdout_score. : max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_holdout_score", + "output": "max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_holdout_score", + "output": "max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows final holdout score", + "output": "max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_final_holdout_score", + "output": "max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_final_holdout_score", + "output": "max rows final holdout score config.toml: Largest number of rows to use for final holdout scores, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_final_holdout_bootstrap_score do? : max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_final_holdout_bootstrap_score. : max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_holdout_bootstrap_score", + "output": "max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_final_holdout_bootstrap_score", + "output": "max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows final holdout bootstrap score", + "output": "max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_final_holdout_bootstrap_score", + "output": "max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_final_holdout_bootstrap_score", + "output": "max rows final holdout bootstrap score config.toml: Largest number of rows to use for final holdout bootstrap scores, otherwise sample randomly" + }, + { + "prompt_type": "plain", + "instruction": ": What does orig_features_fs_report do? : orig features fs report config.toml: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain orig_features_fs_report. : orig features fs report config.toml: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Report permutation importance on original features: . : Set the orig features fs report config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "orig_features_fs_report", + "output": "orig features fs report config.toml: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "orig_features_fs_report", + "output": "orig features fs report config.toml: Report permutation importance on original features: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "orig features fs report", + "output": "orig features fs report config.toml: Report permutation importance on original features: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Report permutation importance on original features: ", + "output": "orig features fs report config.toml: Report permutation importance on original features: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting orig_features_fs_report", + "output": "orig features fs report config.toml: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting orig_features_fs_report", + "output": "orig features fs report config.toml: Report permutation importance on original features: Whether to obtain permutation feature importance on original features for reporting in logs and summary zip file(as files with pattern fs_*.json or fs_*.tab.txt).This computes feature importance on a single un-tuned model(typically LightGBM with pre-defined un-tuned hyperparameters)and simple set of features (encoding typically is frequency encoding or target encoding).Features with low importance are automatically dropped if there are many original features,or a model with feature selection by permutation importance is created if interpretability is high enough in order to see if it gives a better score.One can manually drop low importance features, but this can be risky as transformers or hyperparameters might recovertheir usefulness.Permutation importance is obtained by:1) Transforming categoricals to frequency or target encoding features.2) Fitting that model on many folds, different data sizes, and slightly varying hyperparameters.3) Predicting on that model for each feature where each feature has its data shuffled.4) Computing the score on each shuffled prediction.5) Computing the difference between the unshuffled score and the shuffled score to arrive at a delta score6) The delta score becomes the variable importance once normalized by the maximum. Positive delta scores indicate the feature helped the model score, while negative delta scores indicate the feature hurt the model score. The normalized scores are stored in the fs_normalized_* files in the summary zip. The unnormalized scores (actual delta scores) are stored in the fs_unnormalized_* files in the summary zip.AutoDoc has a similar functionality of providing permutation importance on original features,where that takes the specific final model of an experiment and runs training data set through permutation importance to get original importance,so shuffling of original features is performed and the full pipeline is computed in each shuffled set of original features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_fs do? : max rows fs config.toml: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_fs. : max rows fs config.toml: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of rows to perform permutation-based feature selection: . : Set the max rows fs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_fs", + "output": "max rows fs config.toml: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_fs", + "output": "max rows fs config.toml: Maximum number of rows to perform permutation-based feature selection: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows fs", + "output": "max rows fs config.toml: Maximum number of rows to perform permutation-based feature selection: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of rows to perform permutation-based feature selection: ", + "output": "max rows fs config.toml: Maximum number of rows to perform permutation-based feature selection: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_fs", + "output": "max rows fs config.toml: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_fs", + "output": "max rows fs config.toml: Maximum number of rows to perform permutation-based feature selection: Maximum number of rows when doing permutation feature importance, reduced by (stratified) random sampling. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_leak do? : max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_leak. : max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_leak", + "output": "max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_leak", + "output": "max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows leak", + "output": "max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. rows for leakage detection if wide rules used on wide data: ", + "output": "max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_leak", + "output": "max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_leak", + "output": "max rows leak config.toml: Max. rows for leakage detection if wide rules used on wide data: " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_workers_fs do? : max workers fs config.toml: How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_workers_fs. : max workers fs config.toml: How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. simultaneous predictions for feature selection (0 = auto): . : Set the max workers fs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_fs", + "output": "max workers fs config.toml: How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_fs", + "output": "max workers fs config.toml: Num. simultaneous predictions for feature selection (0 = auto): How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max workers fs", + "output": "max workers fs config.toml: Num. simultaneous predictions for feature selection (0 = auto): How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. simultaneous predictions for feature selection (0 = auto): ", + "output": "max workers fs config.toml: Num. simultaneous predictions for feature selection (0 = auto): How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_workers_fs", + "output": "max workers fs config.toml: How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_workers_fs", + "output": "max workers fs config.toml: Num. simultaneous predictions for feature selection (0 = auto): How many workers to use for feature selection by permutation for predict phase. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_workers_shift_leak do? : max workers shift leak config.toml: How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_workers_shift_leak. : max workers shift leak config.toml: How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. simultaneous fits for shift and leak checks if using LightGBM on CPU (0 = auto): . : Set the max workers shift leak config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_shift_leak", + "output": "max workers shift leak config.toml: How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_workers_shift_leak", + "output": "max workers shift leak config.toml: Num. simultaneous fits for shift and leak checks if using LightGBM on CPU (0 = auto): How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max workers shift leak", + "output": "max workers shift leak config.toml: Num. simultaneous fits for shift and leak checks if using LightGBM on CPU (0 = auto): How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. simultaneous fits for shift and leak checks if using LightGBM on CPU (0 = auto): ", + "output": "max workers shift leak config.toml: Num. simultaneous fits for shift and leak checks if using LightGBM on CPU (0 = auto): How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_workers_shift_leak", + "output": "max workers shift leak config.toml: How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_workers_shift_leak", + "output": "max workers shift leak config.toml: Num. simultaneous fits for shift and leak checks if using LightGBM on CPU (0 = auto): How many workers to use for shift and leakage checks if using LightGBM on CPU. (0 = auto, > 0: min of DAI value and this value, < 0: exactly negative of this value) " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_orig_cols_selected do? : max orig cols selected config.toml: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_orig_cols_selected. : max orig cols selected config.toml: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of original features used: . : Set the max orig cols selected config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_cols_selected", + "output": "max orig cols selected config.toml: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_cols_selected", + "output": "max orig cols selected config.toml: Max. number of original features used: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max orig cols selected", + "output": "max orig cols selected config.toml: Max. number of original features used: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of original features used: ", + "output": "max orig cols selected config.toml: Max. number of original features used: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_orig_cols_selected", + "output": "max orig cols selected config.toml: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_orig_cols_selected", + "output": "max orig cols selected config.toml: Max. number of original features used: Maximum number of columns selected out of original set of original columns, using feature selection.The selection is based upon how well target encoding (or frequency encoding if not available) on categoricals and numerics treated as categoricals.This is useful to reduce the final model complexity. First the best[max_orig_cols_selected] are found through feature selection methods and thenthese features are used in feature evolution (to derive other features) and in modelling. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_orig_numeric_cols_selected do? : max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_orig_numeric_cols_selected. : max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_numeric_cols_selected", + "output": "max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_numeric_cols_selected", + "output": "max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max orig numeric cols selected", + "output": "max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_orig_numeric_cols_selected", + "output": "max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_orig_numeric_cols_selected", + "output": "max orig numeric cols selected config.toml: Maximum number of numeric columns selected, above which will do feature selection same max_orig_cols_selected but for numeric columns." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_orig_nonnumeric_cols_selected do? : max orig nonnumeric cols selected config.toml: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_orig_nonnumeric_cols_selected. : max orig nonnumeric cols selected config.toml: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of original non-numeric features: . : Set the max orig nonnumeric cols selected config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_nonnumeric_cols_selected", + "output": "max orig nonnumeric cols selected config.toml: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_nonnumeric_cols_selected", + "output": "max orig nonnumeric cols selected config.toml: Max. number of original non-numeric features: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max orig nonnumeric cols selected", + "output": "max orig nonnumeric cols selected config.toml: Max. number of original non-numeric features: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of original non-numeric features: ", + "output": "max orig nonnumeric cols selected config.toml: Max. number of original non-numeric features: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_orig_nonnumeric_cols_selected", + "output": "max orig nonnumeric cols selected config.toml: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_orig_nonnumeric_cols_selected", + "output": "max orig nonnumeric cols selected config.toml: Max. number of original non-numeric features: Maximum number of non-numeric columns selected, above which will do feature selection on all features. Same as max_orig_numeric_cols_selected but for categorical columns.If set to -1, then auto mode which uses max_orig_nonnumeric_cols_selected_default, but then for small data can be increased up to 10x larger. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_orig_cols_selected_simple_factor do? : max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_orig_cols_selected_simple_factor. : max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_cols_selected_simple_factor", + "output": "max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_orig_cols_selected_simple_factor", + "output": "max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max orig cols selected simple factor", + "output": "max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_orig_cols_selected_simple_factor", + "output": "max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_orig_cols_selected_simple_factor", + "output": "max orig cols selected simple factor config.toml: The factor times max_orig_cols_selected, by which column selection is based upon no target encoding and no treating numerical as categorical in order to limit performance cost of feature engineering" + }, + { + "prompt_type": "plain", + "instruction": ": What does fs_orig_cols_selected do? : fs orig cols selected config.toml: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fs_orig_cols_selected. : fs orig cols selected config.toml: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of original features used for FS individual: . : Set the fs orig cols selected config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_cols_selected", + "output": "fs orig cols selected config.toml: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_cols_selected", + "output": "fs orig cols selected config.toml: Max. number of original features used for FS individual: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs orig cols selected", + "output": "fs orig cols selected config.toml: Max. number of original features used for FS individual: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of original features used for FS individual: ", + "output": "fs orig cols selected config.toml: Max. number of original features used for FS individual: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fs_orig_cols_selected", + "output": "fs orig cols selected config.toml: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fs_orig_cols_selected", + "output": "fs orig cols selected config.toml: Max. number of original features used for FS individual: Like max_orig_cols_selected, but columns above which add special individual with original columns reduced. " + }, + { + "prompt_type": "plain", + "instruction": ": What does fs_orig_numeric_cols_selected do? : fs orig numeric cols selected config.toml: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fs_orig_numeric_cols_selected. : fs orig numeric cols selected config.toml: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. of original numeric features to trigger feature selection model type: . : Set the fs orig numeric cols selected config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_numeric_cols_selected", + "output": "fs orig numeric cols selected config.toml: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_numeric_cols_selected", + "output": "fs orig numeric cols selected config.toml: Num. of original numeric features to trigger feature selection model type: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs orig numeric cols selected", + "output": "fs orig numeric cols selected config.toml: Num. of original numeric features to trigger feature selection model type: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. of original numeric features to trigger feature selection model type: ", + "output": "fs orig numeric cols selected config.toml: Num. of original numeric features to trigger feature selection model type: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fs_orig_numeric_cols_selected", + "output": "fs orig numeric cols selected config.toml: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fs_orig_numeric_cols_selected", + "output": "fs orig numeric cols selected config.toml: Num. of original numeric features to trigger feature selection model type: Like max_orig_numeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does fs_orig_nonnumeric_cols_selected do? : fs orig nonnumeric cols selected config.toml: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fs_orig_nonnumeric_cols_selected. : fs orig nonnumeric cols selected config.toml: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. of original non-numeric features to trigger feature selection model type: . : Set the fs orig nonnumeric cols selected config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_nonnumeric_cols_selected", + "output": "fs orig nonnumeric cols selected config.toml: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_nonnumeric_cols_selected", + "output": "fs orig nonnumeric cols selected config.toml: Num. of original non-numeric features to trigger feature selection model type: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs orig nonnumeric cols selected", + "output": "fs orig nonnumeric cols selected config.toml: Num. of original non-numeric features to trigger feature selection model type: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. of original non-numeric features to trigger feature selection model type: ", + "output": "fs orig nonnumeric cols selected config.toml: Num. of original non-numeric features to trigger feature selection model type: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fs_orig_nonnumeric_cols_selected", + "output": "fs orig nonnumeric cols selected config.toml: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fs_orig_nonnumeric_cols_selected", + "output": "fs orig nonnumeric cols selected config.toml: Num. of original non-numeric features to trigger feature selection model type: Like max_orig_nonnumeric_cols_selected, but applicable to special individual with original columns reduced. A separate individual in the genetic algorithm is created by doing feature selection by permutation importance on original features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does fs_orig_cols_selected_simple_factor do? : fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fs_orig_cols_selected_simple_factor. : fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_cols_selected_simple_factor", + "output": "fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_orig_cols_selected_simple_factor", + "output": "fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs orig cols selected simple factor", + "output": "fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fs_orig_cols_selected_simple_factor", + "output": "fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fs_orig_cols_selected_simple_factor", + "output": "fs orig cols selected simple factor config.toml: Like max_orig_cols_selected_simple_factor, but applicable to special individual with original columns reduced." + }, + { + "prompt_type": "plain", + "instruction": ": What does predict_shuffle_inside_model do? : predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain predict_shuffle_inside_model. : predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "predict_shuffle_inside_model", + "output": "predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "predict_shuffle_inside_model", + "output": "predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "predict shuffle inside model", + "output": "predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allow supported models to do feature selection by permutation importance within model itself: ", + "output": "predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting predict_shuffle_inside_model", + "output": "predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting predict_shuffle_inside_model", + "output": "predict shuffle inside model config.toml: Allow supported models to do feature selection by permutation importance within model itself: " + }, + { + "prompt_type": "plain", + "instruction": ": What does use_native_cats_for_lgbm_fs do? : use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_native_cats_for_lgbm_fs. : use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_native_cats_for_lgbm_fs", + "output": "use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_native_cats_for_lgbm_fs", + "output": "use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use native cats for lgbm fs", + "output": "use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: ", + "output": "use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_native_cats_for_lgbm_fs", + "output": "use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_native_cats_for_lgbm_fs", + "output": "use native cats for lgbm fs config.toml: Whether to use native categorical handling (CPU only) for LightGBM when doing feature selection by permutation: " + }, + { + "prompt_type": "plain", + "instruction": ": What does orig_stddev_max_cols do? : orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain orig_stddev_max_cols. : orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "orig_stddev_max_cols", + "output": "orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "orig_stddev_max_cols", + "output": "orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "orig stddev max cols", + "output": "orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: ", + "output": "orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting orig_stddev_max_cols", + "output": "orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting orig_stddev_max_cols", + "output": "orig stddev max cols config.toml: Maximum number of original columns up to which will compute standard deviation of original feature importance. Can be expensive if many features.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_relative_cardinality do? : max relative cardinality config.toml: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_relative_cardinality. : max relative cardinality config.toml: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. allowed fraction of uniques for integer and categorical cols: . : Set the max relative cardinality config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_relative_cardinality", + "output": "max relative cardinality config.toml: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_relative_cardinality", + "output": "max relative cardinality config.toml: Max. allowed fraction of uniques for integer and categorical cols: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max relative cardinality", + "output": "max relative cardinality config.toml: Max. allowed fraction of uniques for integer and categorical cols: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. allowed fraction of uniques for integer and categorical cols: ", + "output": "max relative cardinality config.toml: Max. allowed fraction of uniques for integer and categorical cols: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_relative_cardinality", + "output": "max relative cardinality config.toml: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_relative_cardinality", + "output": "max relative cardinality config.toml: Max. allowed fraction of uniques for integer and categorical cols: Maximum allowed fraction of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_absolute_cardinality do? : max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_absolute_cardinality. : max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_absolute_cardinality", + "output": "max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_absolute_cardinality", + "output": "max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max absolute cardinality", + "output": "max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_absolute_cardinality", + "output": "max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_absolute_cardinality", + "output": "max absolute cardinality config.toml: Maximum allowed number of unique values for integer and categorical columns (otherwise will treat column as ID and drop)" + }, + { + "prompt_type": "plain", + "instruction": ": What does num_as_cat do? : num as cat config.toml: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_as_cat. : num as cat config.toml: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Allow treating numerical as categorical: . : Set the num as cat config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_as_cat", + "output": "num as cat config.toml: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_as_cat", + "output": "num as cat config.toml: Allow treating numerical as categorical: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num as cat", + "output": "num as cat config.toml: Allow treating numerical as categorical: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allow treating numerical as categorical: ", + "output": "num as cat config.toml: Allow treating numerical as categorical: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_as_cat", + "output": "num as cat config.toml: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_as_cat", + "output": "num as cat config.toml: Allow treating numerical as categorical: Whether to treat some numerical features as categorical.For instance, sometimes an integer column may not represent a numerical feature butrepresent different numerical codes instead.Very restrictive to disable, since then even columns with few categorical levels that happen to be numericalin value will not be encoded like a categorical. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_int_as_cat_uniques do? : max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_int_as_cat_uniques. : max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of unique values for int/float to be categoricals: . : Set the max int as cat uniques config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_int_as_cat_uniques", + "output": "max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_int_as_cat_uniques", + "output": "max int as cat uniques config.toml: Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max int as cat uniques", + "output": "max int as cat uniques config.toml: Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of unique values for int/float to be categoricals: ", + "output": "max int as cat uniques config.toml: Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_int_as_cat_uniques", + "output": "max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_int_as_cat_uniques", + "output": "max int as cat uniques config.toml: Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only)" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_int_as_cat_uniques_if_not_benford do? : max int as cat uniques if not benford config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_int_as_cat_uniques_if_not_benford. : max int as cat uniques if not benford config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of unique values for int/float to be categoricals if violates Benford's Law: . : Set the max int as cat uniques if not benford config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_int_as_cat_uniques_if_not_benford", + "output": "max int as cat uniques if not benford config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_int_as_cat_uniques_if_not_benford", + "output": "max int as cat uniques if not benford config.toml: Max. number of unique values for int/float to be categoricals if violates Benford's Law: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max int as cat uniques if not benford", + "output": "max int as cat uniques if not benford config.toml: Max. number of unique values for int/float to be categoricals if violates Benford's Law: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of unique values for int/float to be categoricals if violates Benford's Law: ", + "output": "max int as cat uniques if not benford config.toml: Max. number of unique values for int/float to be categoricals if violates Benford's Law: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_int_as_cat_uniques_if_not_benford", + "output": "max int as cat uniques if not benford config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_int_as_cat_uniques_if_not_benford", + "output": "max int as cat uniques if not benford config.toml: Max. number of unique values for int/float to be categoricals if violates Benford's Law: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only). Applies to integer or real numerical feature that violates Benford's law, and so is ID-like but not entirely an ID." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_fraction_invalid_numeric do? : max fraction invalid numeric config.toml: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_fraction_invalid_numeric. : max fraction invalid numeric config.toml: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. fraction of numeric values to be non-numeric (and not missing) for a column to still be considered numeric: . : Set the max fraction invalid numeric config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_fraction_invalid_numeric", + "output": "max fraction invalid numeric config.toml: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_fraction_invalid_numeric", + "output": "max fraction invalid numeric config.toml: Max. fraction of numeric values to be non-numeric (and not missing) for a column to still be considered numeric: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max fraction invalid numeric", + "output": "max fraction invalid numeric config.toml: Max. fraction of numeric values to be non-numeric (and not missing) for a column to still be considered numeric: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. fraction of numeric values to be non-numeric (and not missing) for a column to still be considered numeric: ", + "output": "max fraction invalid numeric config.toml: Max. fraction of numeric values to be non-numeric (and not missing) for a column to still be considered numeric: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_fraction_invalid_numeric", + "output": "max fraction invalid numeric config.toml: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_fraction_invalid_numeric", + "output": "max fraction invalid numeric config.toml: Max. fraction of numeric values to be non-numeric (and not missing) for a column to still be considered numeric: When the fraction of non-numeric (and non-missing) values is less or equal than this value, consider thecolumn numeric. Can help with minor data quality issues for experimentation, > 0 is not recommended for production,since type inconsistencies can occur. Note: Replaces non-numeric values with missing valuesat start of experiment, so some information is lost, but column is now treated as numeric, which can help.If < 0, then disabled.If == 0, then if number of rows <= max_rows_col_stats, then convert any column of strings of numbers to numeric type. " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_folds do? : num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_folds. : num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_folds", + "output": "num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_folds", + "output": "num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num folds", + "output": "num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_folds", + "output": "num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_folds", + "output": "num folds config.toml: Number of folds for models used during the feature engineering process.Increasing this will put a lower fraction of data into validation and more into training(e.g., num_folds=3 means 67%/33% training/validation splits).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_different_classes_across_fold_splits do? : allow different classes across fold splits config.toml: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_different_classes_across_fold_splits. : allow different classes across fold splits config.toml: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Allow different sets of classes across all train/validation fold splits: . : Set the allow different classes across fold splits config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_different_classes_across_fold_splits", + "output": "allow different classes across fold splits config.toml: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_different_classes_across_fold_splits", + "output": "allow different classes across fold splits config.toml: Allow different sets of classes across all train/validation fold splits: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow different classes across fold splits", + "output": "allow different classes across fold splits config.toml: Allow different sets of classes across all train/validation fold splits: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allow different sets of classes across all train/validation fold splits: ", + "output": "allow different classes across fold splits config.toml: Allow different sets of classes across all train/validation fold splits: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_different_classes_across_fold_splits", + "output": "allow different classes across fold splits config.toml: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_different_classes_across_fold_splits", + "output": "allow different classes across fold splits config.toml: Allow different sets of classes across all train/validation fold splits: For multiclass problems only. Whether to allow different sets of target classes across (cross-)validationfold splits. Especially important when passing a fold column that isn't balanced w.r.t class distribution. " + }, + { + "prompt_type": "plain", + "instruction": ": What does full_cv_accuracy_switch do? : full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain full_cv_accuracy_switch. : full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "full_cv_accuracy_switch", + "output": "full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "full_cv_accuracy_switch", + "output": "full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "full cv accuracy switch", + "output": "full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting full_cv_accuracy_switch", + "output": "full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting full_cv_accuracy_switch", + "output": "full cv accuracy switch config.toml: Accuracy setting equal and above which enables full cross-validation (multiple folds) during feature evolutionas opposed to only a single holdout split (e.g. 2/3 train and 1/3 validation holdout) " + }, + { + "prompt_type": "plain", + "instruction": ": What does ensemble_accuracy_switch do? : ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ensemble_accuracy_switch. : ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ensemble_accuracy_switch", + "output": "ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ensemble_accuracy_switch", + "output": "ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ensemble accuracy switch", + "output": "ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ensemble_accuracy_switch", + "output": "ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ensemble_accuracy_switch", + "output": "ensemble accuracy switch config.toml: Accuracy setting equal and above which enables stacked ensemble as final model.Stacking commences at the end of the feature evolution process..It quite often leads to better model performance, but it does increase the complexityand execution time of the final model. " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_ensemble_folds do? : num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_ensemble_folds. : num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_ensemble_folds", + "output": "num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_ensemble_folds", + "output": "num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num ensemble folds", + "output": "num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_ensemble_folds", + "output": "num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_ensemble_folds", + "output": "num ensemble folds config.toml: Number of fold splits to use for ensemble_level >= 2.The ensemble modelling may require predictions to be made on out-of-fold sampleshence the data needs to be split on different folds to generate these predictions.Less folds (like 2 or 3) normally create more stable models, but may be less accurateMore folds can get to higher accuracy at the expense of more time, but the performancemay be less stable when the training data is not enough (i.e. higher chance of overfitting).Actual value will vary for small or big data cases. " + }, + { + "prompt_type": "plain", + "instruction": ": What does save_validation_splits do? : save validation splits config.toml: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain save_validation_splits. : save validation splits config.toml: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Store internal validation split row indices: . : Set the save validation splits config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "save_validation_splits", + "output": "save validation splits config.toml: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "save_validation_splits", + "output": "save validation splits config.toml: Store internal validation split row indices: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "save validation splits", + "output": "save validation splits config.toml: Store internal validation split row indices: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Store internal validation split row indices: ", + "output": "save validation splits config.toml: Store internal validation split row indices: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting save_validation_splits", + "output": "save validation splits config.toml: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting save_validation_splits", + "output": "save validation splits config.toml: Store internal validation split row indices: Includes pickles of (train_idx, valid_idx) tuples (numpy row indices for original training data)for all internal validation folds in the experiment summary zip. For debugging. " + }, + { + "prompt_type": "plain", + "instruction": ": What does fold_reps do? : fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fold_reps. : fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fold_reps", + "output": "fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fold_reps", + "output": "fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fold reps", + "output": "fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fold_reps", + "output": "fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fold_reps", + "output": "fold reps config.toml: Number of repeats for each fold for all validation(modified slightly for small or big data cases) " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_num_classes do? : max num classes config.toml: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_num_classes. : max num classes config.toml: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of classes for classification problems: . : Set the max num classes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_classes", + "output": "max num classes config.toml: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_classes", + "output": "max num classes config.toml: Max. number of classes for classification problems: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max num classes", + "output": "max num classes config.toml: Max. number of classes for classification problems: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of classes for classification problems: ", + "output": "max num classes config.toml: Max. number of classes for classification problems: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_num_classes", + "output": "max num classes config.toml: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_num_classes", + "output": "max num classes config.toml: Max. number of classes for classification problems: Maximum number of classes to allow for a classification problem.High number of classes may make certain processes of Driverless AI time-consuming.Memory requirements also increase with higher number of classes " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_num_classes_compute_roc do? : max num classes compute roc config.toml: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_num_classes_compute_roc. : max num classes compute roc config.toml: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of classes to compute ROC and confusion matrix for classification problems: . : Set the max num classes compute roc config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_classes_compute_roc", + "output": "max num classes compute roc config.toml: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_classes_compute_roc", + "output": "max num classes compute roc config.toml: Max. number of classes to compute ROC and confusion matrix for classification problems: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max num classes compute roc", + "output": "max num classes compute roc config.toml: Max. number of classes to compute ROC and confusion matrix for classification problems: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of classes to compute ROC and confusion matrix for classification problems: ", + "output": "max num classes compute roc config.toml: Max. number of classes to compute ROC and confusion matrix for classification problems: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_num_classes_compute_roc", + "output": "max num classes compute roc config.toml: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_num_classes_compute_roc", + "output": "max num classes compute roc config.toml: Max. number of classes to compute ROC and confusion matrix for classification problems: Maximum number of classes to compute ROC and CM for,beyond which roc_reduce_type choice for reduction is applied.Too many classes can take much longer than model building time. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_num_classes_client_and_gui do? : max num classes client and gui config.toml: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_num_classes_client_and_gui. : max num classes client and gui config.toml: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of classes to show in GUI for confusion matrix: . : Set the max num classes client and gui config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_classes_client_and_gui", + "output": "max num classes client and gui config.toml: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_classes_client_and_gui", + "output": "max num classes client and gui config.toml: Max. number of classes to show in GUI for confusion matrix: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max num classes client and gui", + "output": "max num classes client and gui config.toml: Max. number of classes to show in GUI for confusion matrix: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of classes to show in GUI for confusion matrix: ", + "output": "max num classes client and gui config.toml: Max. number of classes to show in GUI for confusion matrix: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_num_classes_client_and_gui", + "output": "max num classes client and gui config.toml: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_num_classes_client_and_gui", + "output": "max num classes client and gui config.toml: Max. number of classes to show in GUI for confusion matrix: Maximum number of classes to show in GUI for confusion matrix, showing first max_num_classes_client_and_gui labels.Beyond 6 classes the diagnostics launched from GUI are visually truncated.This will only modify client-GUI launched diagnostics if changed in config.toml and server is restarted,while this value can be changed in expert settings to control experiment plots. " + }, + { + "prompt_type": "plain", + "instruction": ": What does roc_reduce_type do? : roc reduce type config.toml: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain roc_reduce_type. : roc reduce type config.toml: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: ROC/CM reduction technique for large class counts: . : Set the roc reduce type config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "roc_reduce_type", + "output": "roc reduce type config.toml: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "roc_reduce_type", + "output": "roc reduce type config.toml: ROC/CM reduction technique for large class counts: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "roc reduce type", + "output": "roc reduce type config.toml: ROC/CM reduction technique for large class counts: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ROC/CM reduction technique for large class counts: ", + "output": "roc reduce type config.toml: ROC/CM reduction technique for large class counts: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting roc_reduce_type", + "output": "roc reduce type config.toml: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting roc_reduce_type", + "output": "roc reduce type config.toml: ROC/CM reduction technique for large class counts: If too many classes when computing roc,reduce by \"rows\" by randomly sampling rows,or reduce by truncating classes to no more than max_num_classes_compute_roc.If have sufficient rows for class count, can reduce by rows. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_cm_ga do? : max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_cm_ga. : max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of rows to obtain confusion matrix related plots during feature evolution: . : Set the max rows cm ga config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_cm_ga", + "output": "max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_cm_ga", + "output": "max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows cm ga", + "output": "max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of rows to obtain confusion matrix related plots during feature evolution: ", + "output": "max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_cm_ga", + "output": "max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_cm_ga", + "output": "max rows cm ga config.toml: Maximum number of rows to obtain confusion matrix related plots during feature evolution: Maximum number of rows to obtain confusion matrix related plots during feature evolution.Does not limit final model calculation. " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_actuals_vs_predicted do? : num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_actuals_vs_predicted. : num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_actuals_vs_predicted", + "output": "num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_actuals_vs_predicted", + "output": "num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num actuals vs predicted", + "output": "num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_actuals_vs_predicted", + "output": "num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_actuals_vs_predicted", + "output": "num actuals vs predicted config.toml: Number of actuals vs. predicted data points to use in order to generate in the relevant plot/graph which is shown at the right part of the screen within an experiment." + }, + { + "prompt_type": "plain", + "instruction": ": What does use_feature_brain_new_experiments do? : use feature brain new experiments config.toml: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_feature_brain_new_experiments. : use feature brain new experiments config.toml: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to use Feature Brain for new experiments.: . : Set the use feature brain new experiments config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_feature_brain_new_experiments", + "output": "use feature brain new experiments config.toml: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_feature_brain_new_experiments", + "output": "use feature brain new experiments config.toml: Whether to use Feature Brain for new experiments.: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use feature brain new experiments", + "output": "use feature brain new experiments config.toml: Whether to use Feature Brain for new experiments.: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to use Feature Brain for new experiments.: ", + "output": "use feature brain new experiments config.toml: Whether to use Feature Brain for new experiments.: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_feature_brain_new_experiments", + "output": "use feature brain new experiments config.toml: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_feature_brain_new_experiments", + "output": "use feature brain new experiments config.toml: Whether to use Feature Brain for new experiments.: Whether to use feature_brain results even if running new experiments. Feature brain can be risky with some types of changes to experiment setup. Even rescoring may be insufficient, so by default this is False. For example, one experiment may have training=external validation by accident, and get high score, and while feature_brain_reset_score='on' means we will rescore, it will have already seen during training the external validation and leak that data as part of what it learned from. If this is False, feature_brain_level just sets possible models to use and logs/notifies, but does not use these feature brain cached models. " + }, + { + "prompt_type": "plain", + "instruction": ": What does resume_data_schema do? : resume data schema config.toml: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "plain", + "instruction": ": Explain resume_data_schema. : resume data schema config.toml: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to reuse dataset schema.: . : Set the resume data schema config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "resume_data_schema", + "output": "resume data schema config.toml: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "resume_data_schema", + "output": "resume data schema config.toml: Whether to reuse dataset schema.: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "resume data schema", + "output": "resume data schema config.toml: Whether to reuse dataset schema.: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to reuse dataset schema.: ", + "output": "resume data schema config.toml: Whether to reuse dataset schema.: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting resume_data_schema", + "output": "resume data schema config.toml: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting resume_data_schema", + "output": "resume data schema config.toml: Whether to reuse dataset schema.: Whether reuse dataset schema, such as data types set in UI for each column, from parent experiment ('on') or to ignore original dataset schema and only use new schema ('off').resume_data_schema=True is a basic form of data lineage, but it may not be desirable if data colunn names changed to incompatible data types like int to string.'auto': for restart, retrain final pipeline, or refit best models, default is to resume data schema, but new experiments would not by default reuse old schema.'on': force reuse of data schema from parent experiment if possible'off': don't reuse data schema under any case.The reuse of the column schema can also be disabled by:in UI: selecting Parent Experiment as Nonein client: setting resume_experiment_id to None" + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_brain_level do? : feature brain level config.toml: Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_brain_level. : feature brain level config.toml: Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Model/Feature Brain Level (0..10): . : Set the feature brain level config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_brain_level", + "output": "feature brain level config.toml: Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_brain_level", + "output": "feature brain level config.toml: Model/Feature Brain Level (0..10): Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature brain level", + "output": "feature brain level config.toml: Model/Feature Brain Level (0..10): Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Model/Feature Brain Level (0..10): ", + "output": "feature brain level config.toml: Model/Feature Brain Level (0..10): Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_brain_level", + "output": "feature brain level config.toml: Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_brain_level", + "output": "feature brain level config.toml: Model/Feature Brain Level (0..10): Whether to show (or use) results from H2O.ai brain: the local caching and smart re-use of prior experiments,in order to generate more useful features and models for new experiments.See use_feature_brain_new_experiments for how new experiments by default do not use brain cache.It can also be used to control checkpointing for experiments that have been paused or interrupted.DAI will use H2O.ai brain cache if cache file hasa) any matching column names and types for a similar experiment typeb) exactly matches classesc) exactly matches class labelsd) matches basic time series choicese) interpretability of cache is equal or lowerf) main model (booster) is allowed by new experiment.Level of brain to use (for chosen level, where higher levels will also do all lower level operations automatically)-1 = Don't use any brain cache and don't write any cache0 = Don't use any brain cache but still write cache Use case: Want to save model for later use, but want current model to be built without any brain models1 = smart checkpoint from latest best individual model Use case: Want to use latest matching model, but match can be loose, so needs caution2 = smart checkpoint from H2O.ai brain cache of individual best models Use case: DAI scans through H2O.ai brain cache for best models to restart from3 = smart checkpoint like level #1, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)4 = smart checkpoint like level #2, but for entire population. Tune only if brain population insufficient size (will re-score entire population in single iteration, so appears to take longer to complete first iteration)5 = like #4, but will scan over entire brain cache of populations to get best scored individuals (can be slower due to brain cache scanning if big cache)1000 + feature_brain_level (above positive values) = use resumed_experiment_id and actual feature_brain_level, to use other specific experiment as base for individuals or population, instead of sampling from any old experimentsGUI has 3 options and corresponding settings:1) New Experiment: Uses feature brain level default of 22) New Experiment With Same Settings: Re-uses the same feature brain level as parent experiment3) Restart From Last Checkpoint: Resets feature brain level to 1003 and sets experiment ID to resume from (continued genetic algorithm iterations)4) Retrain Final Pipeline: Like Restart but also time=0 so skips any tuning and heads straight to final model (assumes had at least one tuning iteration in parent experiment)Other use cases:a) Restart on different data: Use same column names and fewer or more rows (applicable to 1 - 5)b) Re-fit only final pipeline: Like (a), but choose time=1 and feature_brain_level=3 - 5c) Restart with more columns: Add columns, so model builds upon old model built from old column names (1 - 5)d) Restart with focus on model tuning: Restart, then select feature_engineering_effort = 3 in expert settingse) can retrain final model but ignore any original features except those in final pipeline (normal retrain but set brain_add_features_for_new_columns=false)Notes:1) In all cases, we first check the resumed experiment id if given, and then the brain cache2) For Restart cases, may want to set min_dai_iterations to non-zero to force delayed early stopping, else may not be enough iterations to find better model.3) A \"New experiment with Same Settings\" of a Restart will use feature_brain_level=1003 for default Restart mode (revert to 2, or even 0 if want to start a fresh experiment otherwise)" + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_brain_reset_score do? : feature brain reset score config.toml: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_brain_reset_score. : feature brain reset score config.toml: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to re-score models from brain cache: . : Set the feature brain reset score config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_brain_reset_score", + "output": "feature brain reset score config.toml: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_brain_reset_score", + "output": "feature brain reset score config.toml: Whether to re-score models from brain cache: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature brain reset score", + "output": "feature brain reset score config.toml: Whether to re-score models from brain cache: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to re-score models from brain cache: ", + "output": "feature brain reset score config.toml: Whether to re-score models from brain cache: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_brain_reset_score", + "output": "feature brain reset score config.toml: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_brain_reset_score", + "output": "feature brain reset score config.toml: Whether to re-score models from brain cache: Whether to smartly keep score to avoid re-munging/re-training/re-scoring steps brain models ('auto'), always force all steps for all brain imports ('on'), or never rescore ('off'). 'auto' only re-scores if a difference in current and prior experiment warrants re-scoring, like column changes, metric changes, etc. 'on' is useful when smart similarity checking is not reliable enough. 'off' is uesful when know want to keep exact same features and model for final model refit, despite changes in seed or other behaviors in features that might change the outcome if re-scored before reaching final model. If set off, then no limits are applied to features during brain ingestion, while can set brain_add_features_for_new_columns to false if want to ignore any new columns in data. In addition, any unscored individuals loaded from parent experiment are not rescored when doing refit or retrain. Can also set refit_same_best_individual True if want exact same best individual (highest scored model+features) to be used regardless of any scoring changes. " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_change_layer_count_brain do? : allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_change_layer_count_brain. : allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_change_layer_count_brain", + "output": "allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_change_layer_count_brain", + "output": "allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow change layer count brain", + "output": "allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: ", + "output": "allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_change_layer_count_brain", + "output": "allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_change_layer_count_brain", + "output": "allow change layer count brain config.toml: For feature brain or restart/refit, whether to allow brain ingest to use different feature engineering layer count.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does brain_maximum_diff_score do? : brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain brain_maximum_diff_score. : brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_maximum_diff_score", + "output": "brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_maximum_diff_score", + "output": "brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain maximum diff score", + "output": "brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting brain_maximum_diff_score", + "output": "brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting brain_maximum_diff_score", + "output": "brain maximum diff score config.toml: Relative number of columns that must match between current reference individual and brain individual. 0.0: perfect match 1.0: All columns are different, worst match e.g. 0.1 implies no more than 10% of columns mismatch between reference set of columns and brain individual. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_num_brain_indivs do? : max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_num_brain_indivs. : max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_brain_indivs", + "output": "max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_brain_indivs", + "output": "max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max num brain indivs", + "output": "max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_num_brain_indivs", + "output": "max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_num_brain_indivs", + "output": "max num brain indivs config.toml: Maximum number of brain individuals pulled from H2O.ai brain cache for feature_brain_level=1, 2" + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_brain_save_every_iteration do? : feature brain save every iteration config.toml: Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_brain_save_every_iteration. : feature brain save every iteration config.toml: Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Feature Brain Save every which iteration (0 = disable): . : Set the feature brain save every iteration config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_brain_save_every_iteration", + "output": "feature brain save every iteration config.toml: Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_brain_save_every_iteration", + "output": "feature brain save every iteration config.toml: Feature Brain Save every which iteration (0 = disable): Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature brain save every iteration", + "output": "feature brain save every iteration config.toml: Feature Brain Save every which iteration (0 = disable): Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Feature Brain Save every which iteration (0 = disable): ", + "output": "feature brain save every iteration config.toml: Feature Brain Save every which iteration (0 = disable): Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_brain_save_every_iteration", + "output": "feature brain save every iteration config.toml: Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_brain_save_every_iteration", + "output": "feature brain save every iteration config.toml: Feature Brain Save every which iteration (0 = disable): Save feature brain iterations every iter_num % feature_brain_iterations_save_every_iteration == 0, to be able to restart/refit with which_iteration_brain >= 00 means disable " + }, + { + "prompt_type": "plain", + "instruction": ": What does which_iteration_brain do? : which iteration brain config.toml: When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "plain", + "instruction": ": Explain which_iteration_brain. : which iteration brain config.toml: When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Feature Brain Restart from which iteration (-1 = auto): . : Set the which iteration brain config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "which_iteration_brain", + "output": "which iteration brain config.toml: When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "which_iteration_brain", + "output": "which iteration brain config.toml: Feature Brain Restart from which iteration (-1 = auto): When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "which iteration brain", + "output": "which iteration brain config.toml: Feature Brain Restart from which iteration (-1 = auto): When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Feature Brain Restart from which iteration (-1 = auto): ", + "output": "which iteration brain config.toml: Feature Brain Restart from which iteration (-1 = auto): When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting which_iteration_brain", + "output": "which iteration brain config.toml: When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting which_iteration_brain", + "output": "which iteration brain config.toml: Feature Brain Restart from which iteration (-1 = auto): When doing restart or re-fit type feature_brain_level with resumed_experiment_id, choose which iteration to start from, instead of only last best-1 means just use last bestUsage:1) Run one experiment with feature_brain_iterations_save_every_iteration=1 or some other number2) Identify which iteration brain dump one wants to restart/refit from3) Restart/Refit from original experiment, setting which_iteration_brain to that number in expert settingsNote: If restart from a tuning iteration, this will pull in entire scored tuning population and use that for feature evolution " + }, + { + "prompt_type": "plain", + "instruction": ": What does refit_same_best_individual do? : refit same best individual config.toml: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain refit_same_best_individual. : refit same best individual config.toml: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Feature Brain refit uses same best individual: . : Set the refit same best individual config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "refit_same_best_individual", + "output": "refit same best individual config.toml: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "refit_same_best_individual", + "output": "refit same best individual config.toml: Feature Brain refit uses same best individual: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "refit same best individual", + "output": "refit same best individual config.toml: Feature Brain refit uses same best individual: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Feature Brain refit uses same best individual: ", + "output": "refit same best individual config.toml: Feature Brain refit uses same best individual: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting refit_same_best_individual", + "output": "refit same best individual config.toml: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting refit_same_best_individual", + "output": "refit same best individual config.toml: Feature Brain refit uses same best individual: When doing re-fit from feature brain, if change columns or features, population of individuals used to refit from may change order of which was best,leading to better result chosen (False case). But sometimes want to see exact same model/features with only one feature added,and then would need to set this to True case.E.g. if refit with just 1 extra column and have interpretability=1, then final model will be same features,with one more engineered feature applied to that new original feature. " + }, + { + "prompt_type": "plain", + "instruction": ": What does restart_refit_redo_origfs_shift_leak do? : restart refit redo origfs shift leak config.toml: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain restart_refit_redo_origfs_shift_leak. : restart refit redo origfs shift leak config.toml: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: For restart-refit, select which steps to do: . : Set the restart refit redo origfs shift leak config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restart_refit_redo_origfs_shift_leak", + "output": "restart refit redo origfs shift leak config.toml: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restart_refit_redo_origfs_shift_leak", + "output": "restart refit redo origfs shift leak config.toml: For restart-refit, select which steps to do: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restart refit redo origfs shift leak", + "output": "restart refit redo origfs shift leak config.toml: For restart-refit, select which steps to do: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "For restart-refit, select which steps to do: ", + "output": "restart refit redo origfs shift leak config.toml: For restart-refit, select which steps to do: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting restart_refit_redo_origfs_shift_leak", + "output": "restart refit redo origfs shift leak config.toml: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting restart_refit_redo_origfs_shift_leak", + "output": "restart refit redo origfs shift leak config.toml: For restart-refit, select which steps to do: When doing restart or re-fit of experiment from feature brain,sometimes user might change data significantly and then warrantredoing reduction of original features by feature selection, shift detection, and leakage detection.However, in other cases, if data and all options are nearly (or exactly) identical, then thesesteps might change the features slightly (e.g. due to random seed if not setting reproducible mode),leading to changes in features and model that is refitted. By default, restart and refit avoidthese steps assuming data and experiment setup have no changed significantly.If check_distribution_shift is forced to on (instead of auto), then this option is ignored.In order to ensure exact same final pipeline is fitted, one should also set:1) brain_add_features_for_new_columns false2) refit_same_best_individual true3) feature_brain_reset_score 'off'4) force_model_restart_to_defaults falseThe score will still be reset if the experiment metric chosen changes,but changes to the scored model and features will be more frozen in place. " + }, + { + "prompt_type": "plain", + "instruction": ": What does brain_rel_dir do? : brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "plain", + "instruction": ": Explain brain_rel_dir. : brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_rel_dir", + "output": "brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_rel_dir", + "output": "brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain rel dir", + "output": "brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting brain_rel_dir", + "output": "brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting brain_rel_dir", + "output": "brain rel dir config.toml: Directory, relative to data_directory, to store H2O.ai brain meta model files" + }, + { + "prompt_type": "plain", + "instruction": ": What does brain_max_size_GB do? : brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "plain", + "instruction": ": Explain brain_max_size_GB. : brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_max_size_GB", + "output": "brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_max_size_GB", + "output": "brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain max size GB", + "output": "brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting brain_max_size_GB", + "output": "brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting brain_max_size_GB", + "output": "brain max size GB config.toml: Maximum size in bytes the brain will store We reserve this memory to save data in order to ensure we can retrieve an experiment if for any reason it gets interrupted. -1: unlimited >=0 number of GB to limit brain to" + }, + { + "prompt_type": "plain", + "instruction": ": What does brain_add_features_for_new_columns do? : brain add features for new columns config.toml: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain brain_add_features_for_new_columns. : brain add features for new columns config.toml: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Feature Brain adds features with new columns even during retraining final model: . : Set the brain add features for new columns config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_add_features_for_new_columns", + "output": "brain add features for new columns config.toml: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain_add_features_for_new_columns", + "output": "brain add features for new columns config.toml: Feature Brain adds features with new columns even during retraining final model: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "brain add features for new columns", + "output": "brain add features for new columns config.toml: Feature Brain adds features with new columns even during retraining final model: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Feature Brain adds features with new columns even during retraining final model: ", + "output": "brain add features for new columns config.toml: Feature Brain adds features with new columns even during retraining final model: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting brain_add_features_for_new_columns", + "output": "brain add features for new columns config.toml: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting brain_add_features_for_new_columns", + "output": "brain add features for new columns config.toml: Feature Brain adds features with new columns even during retraining final model: Whether to take any new columns and add additional features to pipeline, even if doing retrain final model.In some cases, one might have a new dataset but only want to keep same pipeline regardless of new columns,in which case one sets this to False. For example, new data might lead to new dropped features,due to shift or leak detection. To avoid change of feature set, one can disable all dropping of columns,but set this to False to avoid adding any columns as new features,so pipeline is perfectly preserved when changing data. " + }, + { + "prompt_type": "plain", + "instruction": ": What does force_model_restart_to_defaults do? : force model restart to defaults config.toml: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain force_model_restart_to_defaults. : force model restart to defaults config.toml: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Restart-refit use default model settings if model switches: . : Set the force model restart to defaults config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force_model_restart_to_defaults", + "output": "force model restart to defaults config.toml: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force_model_restart_to_defaults", + "output": "force model restart to defaults config.toml: Restart-refit use default model settings if model switches: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force model restart to defaults", + "output": "force model restart to defaults config.toml: Restart-refit use default model settings if model switches: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Restart-refit use default model settings if model switches: ", + "output": "force model restart to defaults config.toml: Restart-refit use default model settings if model switches: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting force_model_restart_to_defaults", + "output": "force model restart to defaults config.toml: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting force_model_restart_to_defaults", + "output": "force model restart to defaults config.toml: Restart-refit use default model settings if model switches: If restart/refit and no longer have the original model class available, be conservativeand go back to defaults for that model class. If False, then try to keep original hyperparameters,which can fail to work in general. " + }, + { + "prompt_type": "plain", + "instruction": ": What does early_stopping do? : early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "plain", + "instruction": ": Explain early_stopping. : early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "early_stopping", + "output": "early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "early_stopping", + "output": "early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "early stopping", + "output": "early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting early_stopping", + "output": "early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting early_stopping", + "output": "early stopping config.toml: Whether to enable early stopping Early stopping refers to stopping the feature evolution/engineering process when there is no performance uplift after a certain number of iterations. After early stopping has been triggered, Driverless AI will initiate the ensemble process if selected." + }, + { + "prompt_type": "plain", + "instruction": ": What does early_stopping_per_individual do? : early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "plain", + "instruction": ": Explain early_stopping_per_individual. : early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "early_stopping_per_individual", + "output": "early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "early_stopping_per_individual", + "output": "early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "early stopping per individual", + "output": "early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting early_stopping_per_individual", + "output": "early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting early_stopping_per_individual", + "output": "early stopping per individual config.toml: Whether to enable early stopping per individual Each individual in the generic algorithm will stop early if no improvement, and it will no longer be mutated. Instead, the best individual will be additionally mutated." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_dai_iterations do? : min dai iterations config.toml: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_dai_iterations. : min dai iterations config.toml: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. DAI iterations: . : Set the min dai iterations config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_dai_iterations", + "output": "min dai iterations config.toml: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_dai_iterations", + "output": "min dai iterations config.toml: Min. DAI iterations: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min dai iterations", + "output": "min dai iterations config.toml: Min. DAI iterations: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. DAI iterations: ", + "output": "min dai iterations config.toml: Min. DAI iterations: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_dai_iterations", + "output": "min dai iterations config.toml: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_dai_iterations", + "output": "min dai iterations config.toml: Min. DAI iterations: Minimum number of Driverless AI iterations to stop the feature evolution/engineeringprocess even if score is not improving. Driverless AI needs to run for at least that manyiterations before deciding to stop. It can be seen a safeguard against suboptimal (early)convergence. " + }, + { + "prompt_type": "plain", + "instruction": ": What does nfeatures_max do? : nfeatures max config.toml: Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain nfeatures_max. : nfeatures max config.toml: Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of engineered features (-1 = auto): . : Set the nfeatures max config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nfeatures_max", + "output": "nfeatures max config.toml: Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nfeatures_max", + "output": "nfeatures max config.toml: Max. number of engineered features (-1 = auto): Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nfeatures max", + "output": "nfeatures max config.toml: Max. number of engineered features (-1 = auto): Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of engineered features (-1 = auto): ", + "output": "nfeatures max config.toml: Max. number of engineered features (-1 = auto): Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting nfeatures_max", + "output": "nfeatures max config.toml: Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting nfeatures_max", + "output": "nfeatures max config.toml: Max. number of engineered features (-1 = auto): Maximum features per model (and each model within the final model if ensemble) kept.Keeps top variable importance features, prunes rest away, after each scoring.Final ensemble will exclude any pruned-away features and only train on kept features,but may contain a few new features due to fitting on different data view (e.g. new clusters)Final scoring pipeline will exclude any pruned-away features,but may contain a few new features due to fitting on different data view (e.g. new clusters)-1 means no restrictions except internally-determined memory and interpretability restrictions.Notes:* If interpretability > remove_scored_0gain_genes_in_postprocessing_above_interpretability, thenevery GA iteration post-processes features down to this value just after scoring them. Otherwise,only mutations of scored individuals will be pruned (until the final model where limits are strictly applied).* If ngenes_max is not also limited, then some individuals will have more genes and features untilpruned by mutation or by preparation for final model.* E.g. to generally limit every iteration to exactly 1 features, one must set nfeatures_max=ngenes_max=1and remove_scored_0gain_genes_in_postprocessing_above_interpretability=0, but the genetic algorithmwill have a harder time finding good features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does ngenes_max do? : ngenes max config.toml: Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ngenes_max. : ngenes max config.toml: Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of genes (transformer instances) (-1 = auto): . : Set the ngenes max config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ngenes_max", + "output": "ngenes max config.toml: Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ngenes_max", + "output": "ngenes max config.toml: Max. number of genes (transformer instances) (-1 = auto): Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ngenes max", + "output": "ngenes max config.toml: Max. number of genes (transformer instances) (-1 = auto): Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of genes (transformer instances) (-1 = auto): ", + "output": "ngenes max config.toml: Max. number of genes (transformer instances) (-1 = auto): Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ngenes_max", + "output": "ngenes max config.toml: Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ngenes_max", + "output": "ngenes max config.toml: Max. number of genes (transformer instances) (-1 = auto): Maximum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Controls number of genes before features are scored, so just randomly samples genes if pruning occurs.If restriction occurs after scoring features, then aggregated gene importances are used for pruning genes.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "plain", + "instruction": ": What does ngenes_min do? : ngenes min config.toml: Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ngenes_min. : ngenes min config.toml: Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. number of genes (transformer instances) (-1 = auto): . : Set the ngenes min config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ngenes_min", + "output": "ngenes min config.toml: Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ngenes_min", + "output": "ngenes min config.toml: Min. number of genes (transformer instances) (-1 = auto): Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ngenes min", + "output": "ngenes min config.toml: Min. number of genes (transformer instances) (-1 = auto): Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. number of genes (transformer instances) (-1 = auto): ", + "output": "ngenes min config.toml: Min. number of genes (transformer instances) (-1 = auto): Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ngenes_min", + "output": "ngenes min config.toml: Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ngenes_min", + "output": "ngenes min config.toml: Min. number of genes (transformer instances) (-1 = auto): Like ngenes_max but controls minimum number of genes.Useful when DAI by default is making too few genes but want many more.This can be useful when one has few input features, so DAI may remain conservative and not make many transformed features. But user knows that some transformed features may be useful.E.g. only target encoding transformer might have been chosen, and one wants DAI to explore many more possible input features at once." + }, + { + "prompt_type": "plain", + "instruction": ": What does nfeatures_min do? : nfeatures min config.toml: Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "plain", + "instruction": ": Explain nfeatures_min. : nfeatures min config.toml: Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. number of genes (transformer instances) (-1 = auto): . : Set the nfeatures min config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nfeatures_min", + "output": "nfeatures min config.toml: Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nfeatures_min", + "output": "nfeatures min config.toml: Min. number of genes (transformer instances) (-1 = auto): Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nfeatures min", + "output": "nfeatures min config.toml: Min. number of genes (transformer instances) (-1 = auto): Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. number of genes (transformer instances) (-1 = auto): ", + "output": "nfeatures min config.toml: Min. number of genes (transformer instances) (-1 = auto): Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting nfeatures_min", + "output": "nfeatures min config.toml: Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting nfeatures_min", + "output": "nfeatures min config.toml: Min. number of genes (transformer instances) (-1 = auto): Minimum genes (transformer instances) per model (and each model within the final model if ensemble) kept.Instances includes all possible transformers, including original transformer for numeric features.-1 means no restrictions except internally-determined memory and interpretability restrictions " + }, + { + "prompt_type": "plain", + "instruction": ": What does limit_features_by_interpretability do? : limit features by interpretability config.toml: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "plain", + "instruction": ": Explain limit_features_by_interpretability. : limit features by interpretability config.toml: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Limit features by interpretability: . : Set the limit features by interpretability config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_features_by_interpretability", + "output": "limit features by interpretability config.toml: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_features_by_interpretability", + "output": "limit features by interpretability config.toml: Limit features by interpretability: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit features by interpretability", + "output": "limit features by interpretability config.toml: Limit features by interpretability: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Limit features by interpretability: ", + "output": "limit features by interpretability config.toml: Limit features by interpretability: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting limit_features_by_interpretability", + "output": "limit features by interpretability config.toml: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting limit_features_by_interpretability", + "output": "limit features by interpretability config.toml: Limit features by interpretability: Whether to limit feature counts by interpretability setting via features_allowed_by_interpretability" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_tensorflow_textcnn do? : enable tensorflow textcnn config.toml: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_tensorflow_textcnn. : enable tensorflow textcnn config.toml: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable word-based CNN TensorFlow transformers for NLP: . : Set the enable tensorflow textcnn config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_textcnn", + "output": "enable tensorflow textcnn config.toml: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_textcnn", + "output": "enable tensorflow textcnn config.toml: Enable word-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable tensorflow textcnn", + "output": "enable tensorflow textcnn config.toml: Enable word-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable word-based CNN TensorFlow transformers for NLP: ", + "output": "enable tensorflow textcnn config.toml: Enable word-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_tensorflow_textcnn", + "output": "enable tensorflow textcnn config.toml: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_tensorflow_textcnn", + "output": "enable tensorflow textcnn config.toml: Enable word-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_tensorflow_textbigru do? : enable tensorflow textbigru config.toml: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_tensorflow_textbigru. : enable tensorflow textbigru config.toml: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable word-based BiGRU TensorFlow transformers for NLP: . : Set the enable tensorflow textbigru config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_textbigru", + "output": "enable tensorflow textbigru config.toml: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_textbigru", + "output": "enable tensorflow textbigru config.toml: Enable word-based BiGRU TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable tensorflow textbigru", + "output": "enable tensorflow textbigru config.toml: Enable word-based BiGRU TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable word-based BiGRU TensorFlow transformers for NLP: ", + "output": "enable tensorflow textbigru config.toml: Enable word-based BiGRU TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_tensorflow_textbigru", + "output": "enable tensorflow textbigru config.toml: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_tensorflow_textbigru", + "output": "enable tensorflow textbigru config.toml: Enable word-based BiGRU TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Word-based Bi-GRU TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_tensorflow_charcnn do? : enable tensorflow charcnn config.toml: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_tensorflow_charcnn. : enable tensorflow charcnn config.toml: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable character-based CNN TensorFlow transformers for NLP: . : Set the enable tensorflow charcnn config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_charcnn", + "output": "enable tensorflow charcnn config.toml: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_charcnn", + "output": "enable tensorflow charcnn config.toml: Enable character-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable tensorflow charcnn", + "output": "enable tensorflow charcnn config.toml: Enable character-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable character-based CNN TensorFlow transformers for NLP: ", + "output": "enable tensorflow charcnn config.toml: Enable character-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_tensorflow_charcnn", + "output": "enable tensorflow charcnn config.toml: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_tensorflow_charcnn", + "output": "enable tensorflow charcnn config.toml: Enable character-based CNN TensorFlow transformers for NLP: Whether to use out-of-fold predictions of Character-level CNN TensorFlow models as transformers for NLP if TensorFlow enabled" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_pytorch_nlp_transformer do? : enable pytorch nlp transformer config.toml: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_pytorch_nlp_transformer. : enable pytorch nlp transformer config.toml: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable PyTorch transformers for NLP: . : Set the enable pytorch nlp transformer config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_pytorch_nlp_transformer", + "output": "enable pytorch nlp transformer config.toml: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_pytorch_nlp_transformer", + "output": "enable pytorch nlp transformer config.toml: Enable PyTorch transformers for NLP: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable pytorch nlp transformer", + "output": "enable pytorch nlp transformer config.toml: Enable PyTorch transformers for NLP: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable PyTorch transformers for NLP: ", + "output": "enable pytorch nlp transformer config.toml: Enable PyTorch transformers for NLP: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_pytorch_nlp_transformer", + "output": "enable pytorch nlp transformer config.toml: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_pytorch_nlp_transformer", + "output": "enable pytorch nlp transformer config.toml: Enable PyTorch transformers for NLP: Whether to use pretrained PyTorch models as transformers for NLP tasks. Fits a linear model on top of pretrained embeddings. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. GPU(s) are highly recommended.Reduce string_col_as_text_min_relative_cardinality closer to 0.0 and string_col_as_text_threshold closer to 0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_transformer_max_rows_linear_model do? : pytorch nlp transformer max rows linear model config.toml: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_transformer_max_rows_linear_model. : pytorch nlp transformer max rows linear model config.toml: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max number of rows to use for fitting the linear model on top of the pretrained embeddings.: . : Set the pytorch nlp transformer max rows linear model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_transformer_max_rows_linear_model", + "output": "pytorch nlp transformer max rows linear model config.toml: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_transformer_max_rows_linear_model", + "output": "pytorch nlp transformer max rows linear model config.toml: Max number of rows to use for fitting the linear model on top of the pretrained embeddings.: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp transformer max rows linear model", + "output": "pytorch nlp transformer max rows linear model config.toml: Max number of rows to use for fitting the linear model on top of the pretrained embeddings.: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max number of rows to use for fitting the linear model on top of the pretrained embeddings.: ", + "output": "pytorch nlp transformer max rows linear model config.toml: Max number of rows to use for fitting the linear model on top of the pretrained embeddings.: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_transformer_max_rows_linear_model", + "output": "pytorch nlp transformer max rows linear model config.toml: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_transformer_max_rows_linear_model", + "output": "pytorch nlp transformer max rows linear model config.toml: Max number of rows to use for fitting the linear model on top of the pretrained embeddings.: More rows can slow down the fitting process. Recommended values are less than 100000." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_pytorch_nlp_model do? : enable pytorch nlp model config.toml: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_pytorch_nlp_model. : enable pytorch nlp model config.toml: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable PyTorch models for NLP: . : Set the enable pytorch nlp model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_pytorch_nlp_model", + "output": "enable pytorch nlp model config.toml: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_pytorch_nlp_model", + "output": "enable pytorch nlp model config.toml: Enable PyTorch models for NLP: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable pytorch nlp model", + "output": "enable pytorch nlp model config.toml: Enable PyTorch models for NLP: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable PyTorch models for NLP: ", + "output": "enable pytorch nlp model config.toml: Enable PyTorch models for NLP: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_pytorch_nlp_model", + "output": "enable pytorch nlp model config.toml: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_pytorch_nlp_model", + "output": "enable pytorch nlp model config.toml: Enable PyTorch models for NLP: Whether to use pretrained PyTorch models and fine-tune them for NLP tasks. Requires internet connection. Default of 'auto' means disabled. To enable, set to 'on'. These models are only using the first text column, and can be slow to train. GPU(s) are highly recommended.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_pretrained_models do? : pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_pretrained_models. : pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select which pretrained PyTorch NLP model(s) to use.: . : Set the pytorch nlp pretrained models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_models", + "output": "pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_models", + "output": "pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use.: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp pretrained models", + "output": "pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use.: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select which pretrained PyTorch NLP model(s) to use.: ", + "output": "pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use.: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_pretrained_models", + "output": "pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_pretrained_models", + "output": "pytorch nlp pretrained models config.toml: Select which pretrained PyTorch NLP model(s) to use.: Select which pretrained PyTorch NLP model(s) to use. Non-default ones might have no MOJO support. Requires internet connection. Only if PyTorch models or transformers for NLP are set to 'on'." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_max_epochs_nlp do? : tensorflow max epochs nlp config.toml: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_max_epochs_nlp. : tensorflow max epochs nlp config.toml: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. TensorFlow epochs for NLP: . : Set the tensorflow max epochs nlp config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_max_epochs_nlp", + "output": "tensorflow max epochs nlp config.toml: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_max_epochs_nlp", + "output": "tensorflow max epochs nlp config.toml: Max. TensorFlow epochs for NLP: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow max epochs nlp", + "output": "tensorflow max epochs nlp config.toml: Max. TensorFlow epochs for NLP: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. TensorFlow epochs for NLP: ", + "output": "tensorflow max epochs nlp config.toml: Max. TensorFlow epochs for NLP: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_max_epochs_nlp", + "output": "tensorflow max epochs nlp config.toml: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_max_epochs_nlp", + "output": "tensorflow max epochs nlp config.toml: Max. TensorFlow epochs for NLP: Max. number of epochs for TensorFlow models for making NLP features" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_tensorflow_nlp_accuracy_switch do? : enable tensorflow nlp accuracy switch config.toml: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_tensorflow_nlp_accuracy_switch. : enable tensorflow nlp accuracy switch config.toml: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Accuracy above enable TensorFlow NLP by default for all models: . : Set the enable tensorflow nlp accuracy switch config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_nlp_accuracy_switch", + "output": "enable tensorflow nlp accuracy switch config.toml: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_nlp_accuracy_switch", + "output": "enable tensorflow nlp accuracy switch config.toml: Accuracy above enable TensorFlow NLP by default for all models: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable tensorflow nlp accuracy switch", + "output": "enable tensorflow nlp accuracy switch config.toml: Accuracy above enable TensorFlow NLP by default for all models: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Accuracy above enable TensorFlow NLP by default for all models: ", + "output": "enable tensorflow nlp accuracy switch config.toml: Accuracy above enable TensorFlow NLP by default for all models: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_tensorflow_nlp_accuracy_switch", + "output": "enable tensorflow nlp accuracy switch config.toml: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_tensorflow_nlp_accuracy_switch", + "output": "enable tensorflow nlp accuracy switch config.toml: Accuracy above enable TensorFlow NLP by default for all models: Accuracy setting equal and above which will add all enabled TensorFlow NLP models below at start of experiment for text dominated problemswhen TensorFlow NLP transformers are set to auto. If set to on, this parameter is ignored.Otherwise, at lower accuracy, TensorFlow NLP transformations will only be created as a mutation. " + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_nlp_pretrained_embeddings_file_path do? : tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_nlp_pretrained_embeddings_file_path. : tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Path to pretrained embeddings for TensorFlow NLP models. If empty, will train from scratch.: . : Set the tensorflow nlp pretrained embeddings file path config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_embeddings_file_path", + "output": "tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_embeddings_file_path", + "output": "tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models. If empty, will train from scratch.: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow nlp pretrained embeddings file path", + "output": "tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models. If empty, will train from scratch.: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Path to pretrained embeddings for TensorFlow NLP models. If empty, will train from scratch.: ", + "output": "tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models. If empty, will train from scratch.: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_nlp_pretrained_embeddings_file_path", + "output": "tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_nlp_pretrained_embeddings_file_path", + "output": "tensorflow nlp pretrained embeddings file path config.toml: Path to pretrained embeddings for TensorFlow NLP models. If empty, will train from scratch.: Path to pretrained embeddings for TensorFlow NLP models, can be a path in local file system or an S3 location (s3://).For example, download and unzip https://nlp.stanford.edu/data/glove.6B.ziptensorflow_nlp_pretrained_embeddings_file_path = /path/on/server/to/glove.6B.300d.txt " + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_nlp_pretrained_s3_access_key_id do? : tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_nlp_pretrained_s3_access_key_id. : tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_s3_access_key_id", + "output": "tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_s3_access_key_id", + "output": "tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow nlp pretrained s3 access key id", + "output": "tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: ", + "output": "tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_nlp_pretrained_s3_access_key_id", + "output": "tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_nlp_pretrained_s3_access_key_id", + "output": "tensorflow nlp pretrained s3 access key id config.toml: S3 access key Id to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_nlp_pretrained_s3_secret_access_key do? : tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_nlp_pretrained_s3_secret_access_key. : tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_s3_secret_access_key", + "output": "tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_s3_secret_access_key", + "output": "tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow nlp pretrained s3 secret access key", + "output": "tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: ", + "output": "tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_nlp_pretrained_s3_secret_access_key", + "output": "tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_nlp_pretrained_s3_secret_access_key", + "output": "tensorflow nlp pretrained s3 secret access key config.toml: S3 secret access key to use when tensorflow_nlp_pretrained_embeddings_file_path is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_nlp_pretrained_embeddings_trainable do? : tensorflow nlp pretrained embeddings trainable config.toml: Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_nlp_pretrained_embeddings_trainable. : tensorflow nlp pretrained embeddings trainable config.toml: Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: For TensorFlow NLP, allow training of unfrozen pretrained embeddings (in addition to fine-tuning of the rest of the graph): . : Set the tensorflow nlp pretrained embeddings trainable config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_embeddings_trainable", + "output": "tensorflow nlp pretrained embeddings trainable config.toml: Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_nlp_pretrained_embeddings_trainable", + "output": "tensorflow nlp pretrained embeddings trainable config.toml: For TensorFlow NLP, allow training of unfrozen pretrained embeddings (in addition to fine-tuning of the rest of the graph): Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow nlp pretrained embeddings trainable", + "output": "tensorflow nlp pretrained embeddings trainable config.toml: For TensorFlow NLP, allow training of unfrozen pretrained embeddings (in addition to fine-tuning of the rest of the graph): Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "For TensorFlow NLP, allow training of unfrozen pretrained embeddings (in addition to fine-tuning of the rest of the graph): ", + "output": "tensorflow nlp pretrained embeddings trainable config.toml: For TensorFlow NLP, allow training of unfrozen pretrained embeddings (in addition to fine-tuning of the rest of the graph): Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_nlp_pretrained_embeddings_trainable", + "output": "tensorflow nlp pretrained embeddings trainable config.toml: Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_nlp_pretrained_embeddings_trainable", + "output": "tensorflow nlp pretrained embeddings trainable config.toml: For TensorFlow NLP, allow training of unfrozen pretrained embeddings (in addition to fine-tuning of the rest of the graph): Allow training of all weights of the neural network graph, including the pretrained embedding layer weights. If disabled, then the embedding layer is frozen, but all other weights are still fine-tuned." + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_tokenizer_parallel do? : pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_tokenizer_parallel. : pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_tokenizer_parallel", + "output": "pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_tokenizer_parallel", + "output": "pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch tokenizer parallel", + "output": "pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_tokenizer_parallel", + "output": "pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_tokenizer_parallel", + "output": "pytorch tokenizer parallel config.toml: Whether to parallelize tokenization for BERT Models/Transformers." + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_fine_tuning_num_epochs do? : pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_fine_tuning_num_epochs. : pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of epochs for fine-tuning of PyTorch NLP models.: . : Set the pytorch nlp fine tuning num epochs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_fine_tuning_num_epochs", + "output": "pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_fine_tuning_num_epochs", + "output": "pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models.: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp fine tuning num epochs", + "output": "pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models.: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of epochs for fine-tuning of PyTorch NLP models.: ", + "output": "pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models.: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_fine_tuning_num_epochs", + "output": "pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_fine_tuning_num_epochs", + "output": "pytorch nlp fine tuning num epochs config.toml: Number of epochs for fine-tuning of PyTorch NLP models.: Number of epochs for fine-tuning of PyTorch NLP models. Larger values can increase accuracy but take longer to train." + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_fine_tuning_batch_size do? : pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_fine_tuning_batch_size. : pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Batch size for PyTorch NLP models. -1 for automatic.: . : Set the pytorch nlp fine tuning batch size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_fine_tuning_batch_size", + "output": "pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_fine_tuning_batch_size", + "output": "pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. -1 for automatic.: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp fine tuning batch size", + "output": "pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. -1 for automatic.: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Batch size for PyTorch NLP models. -1 for automatic.: ", + "output": "pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. -1 for automatic.: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_fine_tuning_batch_size", + "output": "pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_fine_tuning_batch_size", + "output": "pytorch nlp fine tuning batch size config.toml: Batch size for PyTorch NLP models. -1 for automatic.: Batch size for PyTorch NLP models. Larger models and larger batch sizes will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_fine_tuning_padding_length do? : pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_fine_tuning_padding_length. : pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum sequence length (padding length) for PyTorch NLP models. -1 for automatic.: . : Set the pytorch nlp fine tuning padding length config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_fine_tuning_padding_length", + "output": "pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_fine_tuning_padding_length", + "output": "pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. -1 for automatic.: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp fine tuning padding length", + "output": "pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. -1 for automatic.: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum sequence length (padding length) for PyTorch NLP models. -1 for automatic.: ", + "output": "pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. -1 for automatic.: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_fine_tuning_padding_length", + "output": "pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_fine_tuning_padding_length", + "output": "pytorch nlp fine tuning padding length config.toml: Maximum sequence length (padding length) for PyTorch NLP models. -1 for automatic.: Maximum sequence length (padding length) for PyTorch NLP models. Larger models and larger padding lengths will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_pretrained_models_dir do? : pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_pretrained_models_dir. : pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Path to pretrained PyTorch NLP models. If empty, will get models from S3: . : Set the pytorch nlp pretrained models dir config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_models_dir", + "output": "pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_models_dir", + "output": "pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. If empty, will get models from S3: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp pretrained models dir", + "output": "pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. If empty, will get models from S3: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Path to pretrained PyTorch NLP models. If empty, will get models from S3: ", + "output": "pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. If empty, will get models from S3: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_pretrained_models_dir", + "output": "pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_pretrained_models_dir", + "output": "pytorch nlp pretrained models dir config.toml: Path to pretrained PyTorch NLP models. If empty, will get models from S3: Path to pretrained PyTorch NLP models. Note that this can be either a path in the local file system(/path/on/server/to/bert_models_folder), an URL or a S3 location (s3://).To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/bert_models.zipand unzip and store it in a directory on the instance where DAI is installed.``pytorch_nlp_pretrained_models_dir=/path/on/server/to/bert_models_folder`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_pretrained_s3_access_key_id do? : pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_pretrained_s3_access_key_id. : pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_s3_access_key_id", + "output": "pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_s3_access_key_id", + "output": "pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp pretrained s3 access key id", + "output": "pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: ", + "output": "pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_pretrained_s3_access_key_id", + "output": "pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_pretrained_s3_access_key_id", + "output": "pytorch nlp pretrained s3 access key id config.toml: S3 access key Id to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does pytorch_nlp_pretrained_s3_secret_access_key do? : pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain pytorch_nlp_pretrained_s3_secret_access_key. : pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_s3_secret_access_key", + "output": "pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch_nlp_pretrained_s3_secret_access_key", + "output": "pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pytorch nlp pretrained s3 secret access key", + "output": "pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: ", + "output": "pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pytorch_nlp_pretrained_s3_secret_access_key", + "output": "pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pytorch_nlp_pretrained_s3_secret_access_key", + "output": "pytorch nlp pretrained s3 secret access key config.toml: S3 secret access key to use when pytorch_nlp_pretrained_models_dir is set to an S3 location.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does text_fraction_for_text_dominated_problem do? : text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "plain", + "instruction": ": Explain text_fraction_for_text_dominated_problem. : text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fraction of text columns out of all features to be considered a text-dominated problem: . : Set the text fraction for text dominated problem config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_fraction_for_text_dominated_problem", + "output": "text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_fraction_for_text_dominated_problem", + "output": "text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text fraction for text dominated problem", + "output": "text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fraction of text columns out of all features to be considered a text-dominated problem: ", + "output": "text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting text_fraction_for_text_dominated_problem", + "output": "text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting text_fraction_for_text_dominated_problem", + "output": "text fraction for text dominated problem config.toml: Fraction of text columns out of all features to be considered a text-dominated problem: Fraction of text columns out of all features to be considered a text-dominated problem" + }, + { + "prompt_type": "plain", + "instruction": ": What does text_transformer_fraction_for_text_dominated_problem do? : text transformer fraction for text dominated problem config.toml: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "plain", + "instruction": ": Explain text_transformer_fraction_for_text_dominated_problem. : text transformer fraction for text dominated problem config.toml: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fraction of text per all transformers to trigger that text dominated: . : Set the text transformer fraction for text dominated problem config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_transformer_fraction_for_text_dominated_problem", + "output": "text transformer fraction for text dominated problem config.toml: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_transformer_fraction_for_text_dominated_problem", + "output": "text transformer fraction for text dominated problem config.toml: Fraction of text per all transformers to trigger that text dominated: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text transformer fraction for text dominated problem", + "output": "text transformer fraction for text dominated problem config.toml: Fraction of text per all transformers to trigger that text dominated: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fraction of text per all transformers to trigger that text dominated: ", + "output": "text transformer fraction for text dominated problem config.toml: Fraction of text per all transformers to trigger that text dominated: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting text_transformer_fraction_for_text_dominated_problem", + "output": "text transformer fraction for text dominated problem config.toml: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting text_transformer_fraction_for_text_dominated_problem", + "output": "text transformer fraction for text dominated problem config.toml: Fraction of text per all transformers to trigger that text dominated: Fraction of text transformers to all transformers above which to trigger that text dominated problem" + }, + { + "prompt_type": "plain", + "instruction": ": What does text_dominated_limit_tuning do? : text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain text_dominated_limit_tuning. : text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_dominated_limit_tuning", + "output": "text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_dominated_limit_tuning", + "output": "text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text dominated limit tuning", + "output": "text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting text_dominated_limit_tuning", + "output": "text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting text_dominated_limit_tuning", + "output": "text dominated limit tuning config.toml: Whether to reduce options for text-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_dominated_limit_tuning do? : image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_dominated_limit_tuning. : image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_dominated_limit_tuning", + "output": "image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_dominated_limit_tuning", + "output": "image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image dominated limit tuning", + "output": "image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_dominated_limit_tuning", + "output": "image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_dominated_limit_tuning", + "output": "image dominated limit tuning config.toml: Whether to reduce options for image-dominated models to reduce expense, e.g. disable ensemble, disable genetic algorithm, single identity target encoder for classification, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does string_col_as_text_threshold do? : string col as text threshold config.toml: Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": Explain string_col_as_text_threshold. : string col as text threshold config.toml: Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Threshold for string columns to be treated as text (0.0 - text, 1.0 - string): . : Set the string col as text threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_threshold", + "output": "string col as text threshold config.toml: Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_threshold", + "output": "string col as text threshold config.toml: Threshold for string columns to be treated as text (0.0 - text, 1.0 - string): Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string col as text threshold", + "output": "string col as text threshold config.toml: Threshold for string columns to be treated as text (0.0 - text, 1.0 - string): Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Threshold for string columns to be treated as text (0.0 - text, 1.0 - string): ", + "output": "string col as text threshold config.toml: Threshold for string columns to be treated as text (0.0 - text, 1.0 - string): Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting string_col_as_text_threshold", + "output": "string col as text threshold config.toml: Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting string_col_as_text_threshold", + "output": "string col as text threshold config.toml: Threshold for string columns to be treated as text (0.0 - text, 1.0 - string): Threshold for average string-is-text score as determined by internal heuristicsIt decides when a string column will be treated as text (for an NLP problem) or just asa standard categorical variable.Higher values will favor string columns as categoricals, lower values will favor string columns as text.Set string_col_as_text_min_relative_cardinality=0.0 to force string column to be treated as text despite low number of uniques." + }, + { + "prompt_type": "plain", + "instruction": ": What does string_col_as_text_threshold_preview do? : string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain string_col_as_text_threshold_preview. : string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_threshold_preview", + "output": "string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_threshold_preview", + "output": "string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string col as text threshold preview", + "output": "string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting string_col_as_text_threshold_preview", + "output": "string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting string_col_as_text_threshold_preview", + "output": "string col as text threshold preview config.toml: Threshold for string columns to be treated as text during preview - should be less than string_col_as_text_threshold to allow data with first 20 rows that don't look like text to still work for Text-only transformers (0.0 - text, 1.0 - string)" + }, + { + "prompt_type": "plain", + "instruction": ": What does string_col_as_text_min_relative_cardinality do? : string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain string_col_as_text_min_relative_cardinality. : string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_min_relative_cardinality", + "output": "string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_min_relative_cardinality", + "output": "string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string col as text min relative cardinality", + "output": "string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting string_col_as_text_min_relative_cardinality", + "output": "string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting string_col_as_text_min_relative_cardinality", + "output": "string col as text min relative cardinality config.toml: Mininum fraction of unique values for string columns to be considered as possible text (otherwise categorical)" + }, + { + "prompt_type": "plain", + "instruction": ": What does string_col_as_text_min_absolute_cardinality do? : string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain string_col_as_text_min_absolute_cardinality. : string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_min_absolute_cardinality", + "output": "string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_text_min_absolute_cardinality", + "output": "string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string col as text min absolute cardinality", + "output": "string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting string_col_as_text_min_absolute_cardinality", + "output": "string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting string_col_as_text_min_absolute_cardinality", + "output": "string col as text min absolute cardinality config.toml: Mininum number of uniques for string columns to be considered as possible text (if not already)" + }, + { + "prompt_type": "plain", + "instruction": ": What does tokenize_single_chars do? : tokenize single chars config.toml: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tokenize_single_chars. : tokenize single chars config.toml: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Tokenize single characters.: . : Set the tokenize single chars config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tokenize_single_chars", + "output": "tokenize single chars config.toml: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tokenize_single_chars", + "output": "tokenize single chars config.toml: Tokenize single characters.: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tokenize single chars", + "output": "tokenize single chars config.toml: Tokenize single characters.: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Tokenize single characters.: ", + "output": "tokenize single chars config.toml: Tokenize single characters.: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tokenize_single_chars", + "output": "tokenize single chars config.toml: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tokenize_single_chars", + "output": "tokenize single chars config.toml: Tokenize single characters.: If disabled, require 2 or more alphanumeric characters for a token in Text (Count and TF/IDF) transformers, otherwise create tokens out of single alphanumeric characters. True means that 'Street 3' is tokenized into 'Street' and '3', while False means that it's tokenized into 'Street'." + }, + { + "prompt_type": "plain", + "instruction": ": What does supported_image_types do? : supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain supported_image_types. : supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "supported_image_types", + "output": "supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "supported_image_types", + "output": "supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "supported image types", + "output": "supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting supported_image_types", + "output": "supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting supported_image_types", + "output": "supported image types config.toml: Supported image types. URIs with these endings will be considered as image paths (local or remote)." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_paths_absolute do? : image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_paths_absolute. : image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_paths_absolute", + "output": "image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_paths_absolute", + "output": "image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image paths absolute", + "output": "image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_paths_absolute", + "output": "image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_paths_absolute", + "output": "image paths absolute config.toml: Whether to create absolute paths for images when importing datasets containing images. Can faciliate testing or re-use of frames for scoring." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_tensorflow_image do? : enable tensorflow image config.toml: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_tensorflow_image. : enable tensorflow image config.toml: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Image Transformer for processing of image data: . : Set the enable tensorflow image config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_image", + "output": "enable tensorflow image config.toml: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow_image", + "output": "enable tensorflow image config.toml: Enable Image Transformer for processing of image data: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable tensorflow image", + "output": "enable tensorflow image config.toml: Enable Image Transformer for processing of image data: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Image Transformer for processing of image data: ", + "output": "enable tensorflow image config.toml: Enable Image Transformer for processing of image data: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_tensorflow_image", + "output": "enable tensorflow image config.toml: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_tensorflow_image", + "output": "enable tensorflow image config.toml: Enable Image Transformer for processing of image data: Whether to use pretrained deep learning models for processing of image data as part of the feature engineering pipeline. A column of URIs to images (jpg, png, etc.) will be converted to a numeric representation using ImageNet-pretrained deep learning models. If no GPUs are found, then must be set to 'on' to enable." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_pretrained_models do? : tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_pretrained_models. : tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Supported ImageNet pretrained architectures for Image Transformer: . : Set the tensorflow image pretrained models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_pretrained_models", + "output": "tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_pretrained_models", + "output": "tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image pretrained models", + "output": "tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Supported ImageNet pretrained architectures for Image Transformer: ", + "output": "tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_pretrained_models", + "output": "tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_pretrained_models", + "output": "tensorflow image pretrained models config.toml: Supported ImageNet pretrained architectures for Image Transformer: Supported ImageNet pretrained architectures for Image Transformer. Non-default ones will require internet access to download pretrained models from H2O S3 buckets (To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip and unzip inside tensorflow_image_pretrained_models_dir)." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_vectorization_output_dimension do? : tensorflow image vectorization output dimension config.toml: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_vectorization_output_dimension. : tensorflow image vectorization output dimension config.toml: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Dimensionality of feature space created by Image Transformer: . : Set the tensorflow image vectorization output dimension config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_vectorization_output_dimension", + "output": "tensorflow image vectorization output dimension config.toml: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_vectorization_output_dimension", + "output": "tensorflow image vectorization output dimension config.toml: Dimensionality of feature space created by Image Transformer: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image vectorization output dimension", + "output": "tensorflow image vectorization output dimension config.toml: Dimensionality of feature space created by Image Transformer: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Dimensionality of feature space created by Image Transformer: ", + "output": "tensorflow image vectorization output dimension config.toml: Dimensionality of feature space created by Image Transformer: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_vectorization_output_dimension", + "output": "tensorflow image vectorization output dimension config.toml: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_vectorization_output_dimension", + "output": "tensorflow image vectorization output dimension config.toml: Dimensionality of feature space created by Image Transformer: Dimensionality of feature (embedding) space created by Image Transformer. If more than one is selected, multiple transformers can be active at the same time." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_fine_tune do? : tensorflow image fine tune config.toml: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_fine_tune. : tensorflow image fine tune config.toml: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable fine-tuning of pretrained models used for Image Transformer: . : Set the tensorflow image fine tune config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_fine_tune", + "output": "tensorflow image fine tune config.toml: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_fine_tune", + "output": "tensorflow image fine tune config.toml: Enable fine-tuning of pretrained models used for Image Transformer: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image fine tune", + "output": "tensorflow image fine tune config.toml: Enable fine-tuning of pretrained models used for Image Transformer: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable fine-tuning of pretrained models used for Image Transformer: ", + "output": "tensorflow image fine tune config.toml: Enable fine-tuning of pretrained models used for Image Transformer: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_fine_tune", + "output": "tensorflow image fine tune config.toml: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_fine_tune", + "output": "tensorflow image fine tune config.toml: Enable fine-tuning of pretrained models used for Image Transformer: Enable fine-tuning of the ImageNet pretrained models used for the Image Transformer. Enabling this will slow down training, but should increase accuracy." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_fine_tuning_num_epochs do? : tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_fine_tuning_num_epochs. : tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of epochs for fine-tuning used for Image Transformer: . : Set the tensorflow image fine tuning num epochs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_fine_tuning_num_epochs", + "output": "tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_fine_tuning_num_epochs", + "output": "tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning used for Image Transformer: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image fine tuning num epochs", + "output": "tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning used for Image Transformer: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of epochs for fine-tuning used for Image Transformer: ", + "output": "tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning used for Image Transformer: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_fine_tuning_num_epochs", + "output": "tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_fine_tuning_num_epochs", + "output": "tensorflow image fine tuning num epochs config.toml: Number of epochs for fine-tuning used for Image Transformer: Number of epochs for fine-tuning of ImageNet pretrained models used for the Image Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_augmentations do? : tensorflow image augmentations config.toml: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_augmentations. : tensorflow image augmentations config.toml: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: List of augmentations for fine-tuning used for Image Transformer: . : Set the tensorflow image augmentations config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_augmentations", + "output": "tensorflow image augmentations config.toml: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_augmentations", + "output": "tensorflow image augmentations config.toml: List of augmentations for fine-tuning used for Image Transformer: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image augmentations", + "output": "tensorflow image augmentations config.toml: List of augmentations for fine-tuning used for Image Transformer: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "List of augmentations for fine-tuning used for Image Transformer: ", + "output": "tensorflow image augmentations config.toml: List of augmentations for fine-tuning used for Image Transformer: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_augmentations", + "output": "tensorflow image augmentations config.toml: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_augmentations", + "output": "tensorflow image augmentations config.toml: List of augmentations for fine-tuning used for Image Transformer: The list of possible image augmentations to apply while fine-tuning the ImageNet pretrained models used for the Image Transformer. Details about individual augmentations could be found here: https://albumentations.ai/docs/." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_batch_size do? : tensorflow image batch size config.toml: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_batch_size. : tensorflow image batch size config.toml: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Batch size for Image Transformer. Automatic: -1: . : Set the tensorflow image batch size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_batch_size", + "output": "tensorflow image batch size config.toml: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_batch_size", + "output": "tensorflow image batch size config.toml: Batch size for Image Transformer. Automatic: -1: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image batch size", + "output": "tensorflow image batch size config.toml: Batch size for Image Transformer. Automatic: -1: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Batch size for Image Transformer. Automatic: -1: ", + "output": "tensorflow image batch size config.toml: Batch size for Image Transformer. Automatic: -1: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_batch_size", + "output": "tensorflow image batch size config.toml: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_batch_size", + "output": "tensorflow image batch size config.toml: Batch size for Image Transformer. Automatic: -1: Batch size for Image Transformer. Larger architectures and larger batch sizes will use more memory." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_pretrained_models_dir do? : tensorflow image pretrained models dir config.toml: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_pretrained_models_dir. : tensorflow image pretrained models dir config.toml: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Path to pretrained Image models. It is used to load the pretrained models if there is no Internet access.: . : Set the tensorflow image pretrained models dir config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_pretrained_models_dir", + "output": "tensorflow image pretrained models dir config.toml: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_pretrained_models_dir", + "output": "tensorflow image pretrained models dir config.toml: Path to pretrained Image models. It is used to load the pretrained models if there is no Internet access.: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image pretrained models dir", + "output": "tensorflow image pretrained models dir config.toml: Path to pretrained Image models. It is used to load the pretrained models if there is no Internet access.: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Path to pretrained Image models. It is used to load the pretrained models if there is no Internet access.: ", + "output": "tensorflow image pretrained models dir config.toml: Path to pretrained Image models. It is used to load the pretrained models if there is no Internet access.: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_pretrained_models_dir", + "output": "tensorflow image pretrained models dir config.toml: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_pretrained_models_dir", + "output": "tensorflow image pretrained models dir config.toml: Path to pretrained Image models. It is used to load the pretrained models if there is no Internet access.: Path to pretrained Image models. To get all models, download http://s3.amazonaws.com/artifacts.h2o.ai/releases/ai/h2o/pretrained/dai_image_models_1_10.zip, then extract it in a directory on the instance where Driverless AI is installed. " + }, + { + "prompt_type": "plain", + "instruction": ": What does image_download_timeout do? : image download timeout config.toml: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_download_timeout. : image download timeout config.toml: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Image download timeout in seconds: . : Set the image download timeout config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_download_timeout", + "output": "image download timeout config.toml: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_download_timeout", + "output": "image download timeout config.toml: Image download timeout in seconds: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image download timeout", + "output": "image download timeout config.toml: Image download timeout in seconds: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Image download timeout in seconds: ", + "output": "image download timeout config.toml: Image download timeout in seconds: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_download_timeout", + "output": "image download timeout config.toml: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_download_timeout", + "output": "image download timeout config.toml: Image download timeout in seconds: Max. number of seconds to wait for image download if images are provided by URL" + }, + { + "prompt_type": "plain", + "instruction": ": What does string_col_as_image_max_missing_fraction do? : string col as image max missing fraction config.toml: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain string_col_as_image_max_missing_fraction. : string col as image max missing fraction config.toml: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max allowed fraction of missing values for image column: . : Set the string col as image max missing fraction config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_image_max_missing_fraction", + "output": "string col as image max missing fraction config.toml: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_image_max_missing_fraction", + "output": "string col as image max missing fraction config.toml: Max allowed fraction of missing values for image column: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string col as image max missing fraction", + "output": "string col as image max missing fraction config.toml: Max allowed fraction of missing values for image column: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max allowed fraction of missing values for image column: ", + "output": "string col as image max missing fraction config.toml: Max allowed fraction of missing values for image column: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting string_col_as_image_max_missing_fraction", + "output": "string col as image max missing fraction config.toml: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting string_col_as_image_max_missing_fraction", + "output": "string col as image max missing fraction config.toml: Max allowed fraction of missing values for image column: Maximum fraction of missing elements in a string column for it to be considered as possible image paths (URIs)" + }, + { + "prompt_type": "plain", + "instruction": ": What does string_col_as_image_min_valid_types_fraction do? : string col as image min valid types fraction config.toml: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "plain", + "instruction": ": Explain string_col_as_image_min_valid_types_fraction. : string col as image min valid types fraction config.toml: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. fraction of images that need to be of valid types for image column to be used: . : Set the string col as image min valid types fraction config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_image_min_valid_types_fraction", + "output": "string col as image min valid types fraction config.toml: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string_col_as_image_min_valid_types_fraction", + "output": "string col as image min valid types fraction config.toml: Min. fraction of images that need to be of valid types for image column to be used: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "string col as image min valid types fraction", + "output": "string col as image min valid types fraction config.toml: Min. fraction of images that need to be of valid types for image column to be used: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. fraction of images that need to be of valid types for image column to be used: ", + "output": "string col as image min valid types fraction config.toml: Min. fraction of images that need to be of valid types for image column to be used: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting string_col_as_image_min_valid_types_fraction", + "output": "string col as image min valid types fraction config.toml: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting string_col_as_image_min_valid_types_fraction", + "output": "string col as image min valid types fraction config.toml: Min. fraction of images that need to be of valid types for image column to be used: Fraction of (unique) image URIs that need to have valid endings (as defined by string_col_as_image_valid_types) for a string column to be considered as image data" + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_image_use_gpu do? : tensorflow image use gpu config.toml: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_image_use_gpu. : tensorflow image use gpu config.toml: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable GPU(s) for faster transformations of Image Transformer.: . : Set the tensorflow image use gpu config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_use_gpu", + "output": "tensorflow image use gpu config.toml: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_image_use_gpu", + "output": "tensorflow image use gpu config.toml: Enable GPU(s) for faster transformations of Image Transformer.: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow image use gpu", + "output": "tensorflow image use gpu config.toml: Enable GPU(s) for faster transformations of Image Transformer.: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable GPU(s) for faster transformations of Image Transformer.: ", + "output": "tensorflow image use gpu config.toml: Enable GPU(s) for faster transformations of Image Transformer.: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_image_use_gpu", + "output": "tensorflow image use gpu config.toml: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_image_use_gpu", + "output": "tensorflow image use gpu config.toml: Enable GPU(s) for faster transformations of Image Transformer.: Whether to use GPU(s), if available, to transform images into embeddings with Image Transformer. Can lead to significant speedups." + }, + { + "prompt_type": "plain", + "instruction": ": What does params_image_auto_search_space do? : params image auto search space config.toml: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_image_auto_search_space. : params image auto search space config.toml: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Search parameter overrides for image auto: . : Set the params image auto search space config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_image_auto_search_space", + "output": "params image auto search space config.toml: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_image_auto_search_space", + "output": "params image auto search space config.toml: Search parameter overrides for image auto: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params image auto search space", + "output": "params image auto search space config.toml: Search parameter overrides for image auto: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Search parameter overrides for image auto: ", + "output": "params image auto search space config.toml: Search parameter overrides for image auto: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_image_auto_search_space", + "output": "params image auto search space config.toml: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_image_auto_search_space", + "output": "params image auto search space config.toml: Search parameter overrides for image auto: Nominally, the time dial controls the search space, with higher time trying more options, but any keys present in this dictionary will override the automatic choices.e.g. ``params_image_auto_search_space=\"{'augmentation': ['safe'], 'crop_strategy': ['Resize'], 'optimizer': ['AdamW'], 'dropout': [0.1], 'epochs_per_stage': [5], 'warmup_epochs': [0], 'mixup': [0.0], 'cutmix': [0.0], 'global_pool': ['avg'], 'learning_rate': [3e-4]}\"``Options, e.g. used for time>=8# Overfit Protection Options:'augmentation': ``[\"safe\", \"semi_safe\", \"hard\"]``'crop_strategy': ``[\"Resize\", \"RandomResizedCropSoft\", \"RandomResizedCropHard\"]``'dropout': ``[0.1, 0.3, 0.5]``# Global Pool Options: avgmax -- sum of AVG and MAX poolings catavgmax -- concatenation of AVG and MAX poolings https://github.com/rwightman/pytorch-image-models/blob/master/timm/models/layers/adaptive_avgmax_pool.py ``'global_pool': ['avg', 'avgmax', 'catavgmax']``# Regression: No MixUp and CutMix: ``'mixup': [0.0]`` ``'cutmix': [0.0]``# Classification: Beta distribution coeff to generate weights for MixUp: ``'mixup': [0.0, 0.4, 1.0, 3.0]`` ``'cutmix': [0.0, 0.4, 1.0, 3.0]``# Optimization Options:``'epochs_per_stage': [5, 10, 15]`` # from 40 to 135 epochs``'warmup_epochs': [0, 0.5, 1]````'optimizer': [\"AdamW\", \"SGD\"]````'learning_rate': [1e-3, 3e-4, 1e-4]``" + }, + { + "prompt_type": "plain", + "instruction": ": What does image_auto_arch do? : image auto arch config.toml: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_auto_arch. : image auto arch config.toml: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Architectures for image auto: . : Set the image auto arch config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_arch", + "output": "image auto arch config.toml: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_arch", + "output": "image auto arch config.toml: Architectures for image auto: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image auto arch", + "output": "image auto arch config.toml: Architectures for image auto: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Architectures for image auto: ", + "output": "image auto arch config.toml: Architectures for image auto: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_auto_arch", + "output": "image auto arch config.toml: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_auto_arch", + "output": "image auto arch config.toml: Architectures for image auto: Nominally, the accuracy dial controls the architectures considered if this is left empty, but one can choose specific ones. The options in the list are ordered by complexity." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_auto_min_shape do? : image auto min shape config.toml: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_auto_min_shape. : image auto min shape config.toml: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minimum image size: . : Set the image auto min shape config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_min_shape", + "output": "image auto min shape config.toml: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_min_shape", + "output": "image auto min shape config.toml: Minimum image size: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image auto min shape", + "output": "image auto min shape config.toml: Minimum image size: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minimum image size: ", + "output": "image auto min shape config.toml: Minimum image size: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_auto_min_shape", + "output": "image auto min shape config.toml: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_auto_min_shape", + "output": "image auto min shape config.toml: Minimum image size: Any images smaller are upscaled to the minimum. Default is 64, but can be as small as 32 given the pooling layers used." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_auto_num_final_models do? : image auto num final models config.toml: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_auto_num_final_models. : image auto num final models config.toml: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of models in final ensemble: . : Set the image auto num final models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_num_final_models", + "output": "image auto num final models config.toml: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_num_final_models", + "output": "image auto num final models config.toml: Number of models in final ensemble: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image auto num final models", + "output": "image auto num final models config.toml: Number of models in final ensemble: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of models in final ensemble: ", + "output": "image auto num final models config.toml: Number of models in final ensemble: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_auto_num_final_models", + "output": "image auto num final models config.toml: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_auto_num_final_models", + "output": "image auto num final models config.toml: Number of models in final ensemble: 0 means automatic based upon time dial of min(1, time//2)." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_auto_num_models do? : image auto num models config.toml: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_auto_num_models. : image auto num models config.toml: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of models in search space: . : Set the image auto num models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_num_models", + "output": "image auto num models config.toml: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_num_models", + "output": "image auto num models config.toml: Number of models in search space: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image auto num models", + "output": "image auto num models config.toml: Number of models in search space: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of models in search space: ", + "output": "image auto num models config.toml: Number of models in search space: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_auto_num_models", + "output": "image auto num models config.toml: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_auto_num_models", + "output": "image auto num models config.toml: Number of models in search space: 0 means automatic based upon time dial of max(4 * (time - 1), 2)." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_auto_num_stages do? : image auto num stages config.toml: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_auto_num_stages. : image auto num stages config.toml: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of stages for hyperparameter search: . : Set the image auto num stages config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_num_stages", + "output": "image auto num stages config.toml: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_num_stages", + "output": "image auto num stages config.toml: Number of stages for hyperparameter search: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image auto num stages", + "output": "image auto num stages config.toml: Number of stages for hyperparameter search: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of stages for hyperparameter search: ", + "output": "image auto num stages config.toml: Number of stages for hyperparameter search: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_auto_num_stages", + "output": "image auto num stages config.toml: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_auto_num_stages", + "output": "image auto num stages config.toml: Number of stages for hyperparameter search: 0 means automatic based upon time dial of time + 1 if time < 6 else time - 1." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_auto_iterations do? : image auto iterations config.toml: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_auto_iterations. : image auto iterations config.toml: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of iterations for successive halving: . : Set the image auto iterations config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_iterations", + "output": "image auto iterations config.toml: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_iterations", + "output": "image auto iterations config.toml: Number of iterations for successive halving: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image auto iterations", + "output": "image auto iterations config.toml: Number of iterations for successive halving: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of iterations for successive halving: ", + "output": "image auto iterations config.toml: Number of iterations for successive halving: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_auto_iterations", + "output": "image auto iterations config.toml: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_auto_iterations", + "output": "image auto iterations config.toml: Number of iterations for successive halving: 0 means automatic based upon time dial or number of models and stages set by image_auto_num_models and image_auto_num_stages." + }, + { + "prompt_type": "plain", + "instruction": ": What does image_auto_shape_factor do? : image auto shape factor config.toml: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "plain", + "instruction": ": Explain image_auto_shape_factor. : image auto shape factor config.toml: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Image downscale ratio to use for training: . : Set the image auto shape factor config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_shape_factor", + "output": "image auto shape factor config.toml: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image_auto_shape_factor", + "output": "image auto shape factor config.toml: Image downscale ratio to use for training: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "image auto shape factor", + "output": "image auto shape factor config.toml: Image downscale ratio to use for training: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Image downscale ratio to use for training: ", + "output": "image auto shape factor config.toml: Image downscale ratio to use for training: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting image_auto_shape_factor", + "output": "image auto shape factor config.toml: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting image_auto_shape_factor", + "output": "image auto shape factor config.toml: Image downscale ratio to use for training: 0.0 means automatic based upon the current stage, where stage 0 uses half, stage 1 uses 3/4, and stage 2 uses full image. One can pass 1.0 to override and always use full image. 0.5 would mean use half." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_image_auto_ddp_cores do? : max image auto ddp cores config.toml: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_image_auto_ddp_cores. : max image auto ddp cores config.toml: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of cores to use for image auto model parallel data management: . : Set the max image auto ddp cores config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_image_auto_ddp_cores", + "output": "max image auto ddp cores config.toml: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_image_auto_ddp_cores", + "output": "max image auto ddp cores config.toml: Maximum number of cores to use for image auto model parallel data management: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max image auto ddp cores", + "output": "max image auto ddp cores config.toml: Maximum number of cores to use for image auto model parallel data management: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of cores to use for image auto model parallel data management: ", + "output": "max image auto ddp cores config.toml: Maximum number of cores to use for image auto model parallel data management: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_image_auto_ddp_cores", + "output": "max image auto ddp cores config.toml: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_image_auto_ddp_cores", + "output": "max image auto ddp cores config.toml: Maximum number of cores to use for image auto model parallel data management: Control maximum number of cores to use for image auto model parallel data management. 0 will disable mp: https://pytorch-lightning.readthedocs.io/en/latest/guides/speed.html" + }, + { + "prompt_type": "plain", + "instruction": ": What does text_dl_token_pad_percentile do? : text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain text_dl_token_pad_percentile. : text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_dl_token_pad_percentile", + "output": "text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_dl_token_pad_percentile", + "output": "text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text dl token pad percentile", + "output": "text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting text_dl_token_pad_percentile", + "output": "text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting text_dl_token_pad_percentile", + "output": "text dl token pad percentile config.toml: Percentile value cutoff of input text token lengths for nlp deep learning models" + }, + { + "prompt_type": "plain", + "instruction": ": What does text_dl_token_pad_max do? : text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain text_dl_token_pad_max. : text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_dl_token_pad_max", + "output": "text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_dl_token_pad_max", + "output": "text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text dl token pad max", + "output": "text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting text_dl_token_pad_max", + "output": "text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting text_dl_token_pad_max", + "output": "text dl token pad max config.toml: Maximum token length of input text to be used in nlp deep learning models" + }, + { + "prompt_type": "plain", + "instruction": ": What does monotonicity_constraints_interpretability_switch do? : monotonicity constraints interpretability switch config.toml: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain monotonicity_constraints_interpretability_switch. : monotonicity constraints interpretability switch config.toml: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Threshold for interpretability above which to enable automatic monotonicity constraints for tree models: . : Set the monotonicity constraints interpretability switch config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_interpretability_switch", + "output": "monotonicity constraints interpretability switch config.toml: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_interpretability_switch", + "output": "monotonicity constraints interpretability switch config.toml: Threshold for interpretability above which to enable automatic monotonicity constraints for tree models: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity constraints interpretability switch", + "output": "monotonicity constraints interpretability switch config.toml: Threshold for interpretability above which to enable automatic monotonicity constraints for tree models: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Threshold for interpretability above which to enable automatic monotonicity constraints for tree models: ", + "output": "monotonicity constraints interpretability switch config.toml: Threshold for interpretability above which to enable automatic monotonicity constraints for tree models: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting monotonicity_constraints_interpretability_switch", + "output": "monotonicity constraints interpretability switch config.toml: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting monotonicity_constraints_interpretability_switch", + "output": "monotonicity constraints interpretability switch config.toml: Threshold for interpretability above which to enable automatic monotonicity constraints for tree models: Interpretability setting equal and above which will use automatic monotonicity constraints inXGBoostGBM/LightGBM/DecisionTree models. " + }, + { + "prompt_type": "plain", + "instruction": ": What does monotonicity_constraints_log_level do? : monotonicity constraints log level config.toml: For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "plain", + "instruction": ": Explain monotonicity_constraints_log_level. : monotonicity constraints log level config.toml: For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Control amount of logging when calculating automatic monotonicity constraints (if enabled): . : Set the monotonicity constraints log level config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_log_level", + "output": "monotonicity constraints log level config.toml: For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_log_level", + "output": "monotonicity constraints log level config.toml: Control amount of logging when calculating automatic monotonicity constraints (if enabled): For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity constraints log level", + "output": "monotonicity constraints log level config.toml: Control amount of logging when calculating automatic monotonicity constraints (if enabled): For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Control amount of logging when calculating automatic monotonicity constraints (if enabled): ", + "output": "monotonicity constraints log level config.toml: Control amount of logging when calculating automatic monotonicity constraints (if enabled): For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting monotonicity_constraints_log_level", + "output": "monotonicity constraints log level config.toml: For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting monotonicity_constraints_log_level", + "output": "monotonicity constraints log level config.toml: Control amount of logging when calculating automatic monotonicity constraints (if enabled): For models that support monotonicity constraints, and if enabled, show automatically determined monotonicity constraints for each feature going into the model based on its correlation with the target. 'low' shows only monotonicity constraint direction. 'medium' shows correlation of positively and negatively constraint features. 'high' shows all correlation values." + }, + { + "prompt_type": "plain", + "instruction": ": What does monotonicity_constraints_correlation_threshold do? : monotonicity constraints correlation threshold config.toml: Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain monotonicity_constraints_correlation_threshold. : monotonicity constraints correlation threshold config.toml: Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Correlation beyond which triggers monotonicity constraints (if enabled): . : Set the monotonicity constraints correlation threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_correlation_threshold", + "output": "monotonicity constraints correlation threshold config.toml: Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_correlation_threshold", + "output": "monotonicity constraints correlation threshold config.toml: Correlation beyond which triggers monotonicity constraints (if enabled): Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity constraints correlation threshold", + "output": "monotonicity constraints correlation threshold config.toml: Correlation beyond which triggers monotonicity constraints (if enabled): Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Correlation beyond which triggers monotonicity constraints (if enabled): ", + "output": "monotonicity constraints correlation threshold config.toml: Correlation beyond which triggers monotonicity constraints (if enabled): Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting monotonicity_constraints_correlation_threshold", + "output": "monotonicity constraints correlation threshold config.toml: Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting monotonicity_constraints_correlation_threshold", + "output": "monotonicity constraints correlation threshold config.toml: Correlation beyond which triggers monotonicity constraints (if enabled): Threshold, of Pearson product-moment correlation coefficient between numerical or encoded transformedfeature and target, above (below negative for) which will enforce positive (negative) monotonicityfor XGBoostGBM, LightGBM and DecisionTree models.Enabled when interpretability >= monotonicity_constraints_interpretability_switch config toml value.Only if monotonicity_constraints_dict is not provided. " + }, + { + "prompt_type": "plain", + "instruction": ": What does monotonicity_constraints_drop_low_correlation_features do? : monotonicity constraints drop low correlation features config.toml: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain monotonicity_constraints_drop_low_correlation_features. : monotonicity constraints drop low correlation features config.toml: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to drop features that have no monotonicity constraint applied (e.g., due to low correlation with target).: . : Set the monotonicity constraints drop low correlation features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_drop_low_correlation_features", + "output": "monotonicity constraints drop low correlation features config.toml: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_drop_low_correlation_features", + "output": "monotonicity constraints drop low correlation features config.toml: Whether to drop features that have no monotonicity constraint applied (e.g., due to low correlation with target).: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity constraints drop low correlation features", + "output": "monotonicity constraints drop low correlation features config.toml: Whether to drop features that have no monotonicity constraint applied (e.g., due to low correlation with target).: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to drop features that have no monotonicity constraint applied (e.g., due to low correlation with target).: ", + "output": "monotonicity constraints drop low correlation features config.toml: Whether to drop features that have no monotonicity constraint applied (e.g., due to low correlation with target).: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting monotonicity_constraints_drop_low_correlation_features", + "output": "monotonicity constraints drop low correlation features config.toml: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting monotonicity_constraints_drop_low_correlation_features", + "output": "monotonicity constraints drop low correlation features config.toml: Whether to drop features that have no monotonicity constraint applied (e.g., due to low correlation with target).: If enabled, only monotonic features with +1/-1 constraints will be passed to the model(s), and featureswithout monotonicity constraints (0, as set by monotonicity_constraints_dict or determined automatically)will be dropped. Otherwise all features will be in the model.Only active when interpretability >= monotonicity_constraints_interpretability_switch ormonotonicity_constraints_dict is provided. " + }, + { + "prompt_type": "plain", + "instruction": ": What does monotonicity_constraints_dict do? : monotonicity constraints dict config.toml: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "plain", + "instruction": ": Explain monotonicity_constraints_dict. : monotonicity constraints dict config.toml: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Manual override for monotonicity constraints: . : Set the monotonicity constraints dict config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_dict", + "output": "monotonicity constraints dict config.toml: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity_constraints_dict", + "output": "monotonicity constraints dict config.toml: Manual override for monotonicity constraints: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "monotonicity constraints dict", + "output": "monotonicity constraints dict config.toml: Manual override for monotonicity constraints: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Manual override for monotonicity constraints: ", + "output": "monotonicity constraints dict config.toml: Manual override for monotonicity constraints: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting monotonicity_constraints_dict", + "output": "monotonicity constraints dict config.toml: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting monotonicity_constraints_dict", + "output": "monotonicity constraints dict config.toml: Manual override for monotonicity constraints: Manual override for monotonicity constraints. Mapping of original numeric features to desired constraint(1 for pos, -1 for neg, or 0 to disable. True can be set for automatic handling, False is same as 0).Features that are not listed here will be treated automatically,and so get no constraint (i.e., 0) if interpretability < monotonicity_constraints_interpretability_switchand otherwise the constraint is automatically determined from the correlation between each feature and the target.Example: {'PAY_0': -1, 'PAY_2': -1, 'AGE': -1, 'BILL_AMT1': 1, 'PAY_AMT1': -1} " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_feature_interaction_depth do? : max feature interaction depth config.toml: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_feature_interaction_depth. : max feature interaction depth config.toml: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. feature interaction depth: . : Set the max feature interaction depth config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_feature_interaction_depth", + "output": "max feature interaction depth config.toml: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_feature_interaction_depth", + "output": "max feature interaction depth config.toml: Max. feature interaction depth: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max feature interaction depth", + "output": "max feature interaction depth config.toml: Max. feature interaction depth: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. feature interaction depth: ", + "output": "max feature interaction depth config.toml: Max. feature interaction depth: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_feature_interaction_depth", + "output": "max feature interaction depth config.toml: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_feature_interaction_depth", + "output": "max feature interaction depth config.toml: Max. feature interaction depth: Exploring feature interactions can be important in gaining better predictive performance.The interaction can take multiple forms (i.e. feature1 + feature2 or feature1 * feature2 + ... featureN)Although certain machine learning algorithms (like tree-based methods) can do well incapturing these interactions as part of their training process, still generating them mayhelp them (or other algorithms) yield better performance.The depth of the interaction level (as in \"up to\" how many features may be combined atonce to create one single feature) can be specified to control the complexity of thefeature engineering process. For transformers that use both numeric and categorical features, this constrainsthe number of each type, not the total number. Higher values might be able to make more predictive modelsat the expense of time (-1 means automatic). " + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_feature_interaction_depth do? : fixed feature interaction depth config.toml: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_feature_interaction_depth. : fixed feature interaction depth config.toml: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fixed feature interaction depth: . : Set the fixed feature interaction depth config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_feature_interaction_depth", + "output": "fixed feature interaction depth config.toml: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_feature_interaction_depth", + "output": "fixed feature interaction depth config.toml: Fixed feature interaction depth: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed feature interaction depth", + "output": "fixed feature interaction depth config.toml: Fixed feature interaction depth: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fixed feature interaction depth: ", + "output": "fixed feature interaction depth config.toml: Fixed feature interaction depth: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_feature_interaction_depth", + "output": "fixed feature interaction depth config.toml: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_feature_interaction_depth", + "output": "fixed feature interaction depth config.toml: Fixed feature interaction depth: Instead of sampling from min to max (up to max_feature_interaction_depth unless all specified)columns allowed for each transformer (0), choose fixed non-zero number of columns to use.Can make same as number of columns to use all columns for each transformers if allowed by each transformer.-n can be chosen to do 50/50 sample and fixed of n features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does tune_parameters_accuracy_switch do? : tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tune_parameters_accuracy_switch. : tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune_parameters_accuracy_switch", + "output": "tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune_parameters_accuracy_switch", + "output": "tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune parameters accuracy switch", + "output": "tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tune_parameters_accuracy_switch", + "output": "tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tune_parameters_accuracy_switch", + "output": "tune parameters accuracy switch config.toml: Accuracy setting equal and above which enables tuning of model parameters Only applicable if parameter_tuning_num_models=-1 (auto)" + }, + { + "prompt_type": "plain", + "instruction": ": What does tune_target_transform_accuracy_switch do? : tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tune_target_transform_accuracy_switch. : tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune_target_transform_accuracy_switch", + "output": "tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune_target_transform_accuracy_switch", + "output": "tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune target transform accuracy switch", + "output": "tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tune_target_transform_accuracy_switch", + "output": "tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tune_target_transform_accuracy_switch", + "output": "tune target transform accuracy switch config.toml: Accuracy setting equal and above which enables tuning of target transform for regression. This is useful for time series when instead of predicting the actual target value, it might be better to predict a transformed target variable like sqrt(target) or log(target) as a means to control for outliers." + }, + { + "prompt_type": "plain", + "instruction": ": What does target_transformer do? : target transformer config.toml: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain target_transformer. : target transformer config.toml: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select target transformation of the target for regression problems: . : Set the target transformer config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "target_transformer", + "output": "target transformer config.toml: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "target_transformer", + "output": "target transformer config.toml: Select target transformation of the target for regression problems: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "target transformer", + "output": "target transformer config.toml: Select target transformation of the target for regression problems: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select target transformation of the target for regression problems: ", + "output": "target transformer config.toml: Select target transformation of the target for regression problems: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting target_transformer", + "output": "target transformer config.toml: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting target_transformer", + "output": "target transformer config.toml: Select target transformation of the target for regression problems: Select a target transformation for regression problems. Must be one of: ['auto','identity', 'identity_noclip', 'center', 'standardize', 'unit_box', 'log', 'log_noclip', 'square','sqrt', 'double_sqrt', 'inverse', 'anscombe', 'logit', 'sigmoid'].If set to 'auto', will automatically pick the best target transformer (if accuracy is set totune_target_transform_accuracy_switch or larger, considering interpretability level of each target transformer),otherwise will fall back to 'identity_noclip' (easiest to interpret, Shapley values are in original space, etc.).All transformers except for 'center', 'standardize', 'identity_noclip' and 'log_noclip' perform clippingto constrain the predictions to the domain of the target in the training data. Use 'center', 'standardize','identity_noclip' or 'log_noclip' to disable clipping and to allow predictions outside of the target domain observed inthe training data (for parametric models or custom models that support extrapolation). " + }, + { + "prompt_type": "plain", + "instruction": ": What does target_transformer_tuning_choices do? : target transformer tuning choices config.toml: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain target_transformer_tuning_choices. : target transformer tuning choices config.toml: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select all allowed target transformations of the target for regression problems when doing target transformer tuning: . : Set the target transformer tuning choices config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "target_transformer_tuning_choices", + "output": "target transformer tuning choices config.toml: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "target_transformer_tuning_choices", + "output": "target transformer tuning choices config.toml: Select all allowed target transformations of the target for regression problems when doing target transformer tuning: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "target transformer tuning choices", + "output": "target transformer tuning choices config.toml: Select all allowed target transformations of the target for regression problems when doing target transformer tuning: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select all allowed target transformations of the target for regression problems when doing target transformer tuning: ", + "output": "target transformer tuning choices config.toml: Select all allowed target transformations of the target for regression problems when doing target transformer tuning: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting target_transformer_tuning_choices", + "output": "target transformer tuning choices config.toml: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting target_transformer_tuning_choices", + "output": "target transformer tuning choices config.toml: Select all allowed target transformations of the target for regression problems when doing target transformer tuning: Select list of target transformers to use for tuning. Only for target_transformer='auto' and accuracy >= tune_target_transform_accuracy_switch. " + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_style do? : tournament style config.toml: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_style. : tournament style config.toml: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Tournament model for genetic algorithm: . : Set the tournament style config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_style", + "output": "tournament style config.toml: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_style", + "output": "tournament style config.toml: Tournament model for genetic algorithm: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament style", + "output": "tournament style config.toml: Tournament model for genetic algorithm: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Tournament model for genetic algorithm: ", + "output": "tournament style config.toml: Tournament model for genetic algorithm: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_style", + "output": "tournament style config.toml: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_style", + "output": "tournament style config.toml: Tournament model for genetic algorithm: Tournament style (method to decide which models are best at each iteration)'auto' : Choose based upon accuracy and interpretability'uniform' : all individuals in population compete to win as best (can lead to all, e.g. LightGBM models in final ensemble, which may not improve ensemble performance due to lack of diversity)'model' : individuals with same model type compete (good if multiple models do well but some models that do not do as well still contribute to improving ensemble)'feature' : individuals with similar feature types compete (good if target encoding, frequency encoding, and other feature sets lead to good results)'fullstack' : Choose among optimal model and feature types'model' and 'feature' styles preserve at least one winner for each type (and so 2 total indivs of each type after mutation)For each case, a round robin approach is used to choose best scores among type of models to choose from.If enable_genetic_algorithm=='Optuna', then every individual is self-mutated without any tournamentduring the genetic algorithm. The tournament is only used to prune-down individuals for, e.g.,tuning -> evolution and evolution -> final model. " + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_uniform_style_interpretability_switch do? : tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_uniform_style_interpretability_switch. : tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_uniform_style_interpretability_switch", + "output": "tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_uniform_style_interpretability_switch", + "output": "tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament uniform style interpretability switch", + "output": "tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_uniform_style_interpretability_switch", + "output": "tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_uniform_style_interpretability_switch", + "output": "tournament uniform style interpretability switch config.toml: Interpretability above which will use 'uniform' tournament style" + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_uniform_style_accuracy_switch do? : tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_uniform_style_accuracy_switch. : tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_uniform_style_accuracy_switch", + "output": "tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_uniform_style_accuracy_switch", + "output": "tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament uniform style accuracy switch", + "output": "tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_uniform_style_accuracy_switch", + "output": "tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_uniform_style_accuracy_switch", + "output": "tournament uniform style accuracy switch config.toml: Accuracy below which will use uniform style if tournament_style = 'auto' (regardless of other accuracy tournament style switch values)" + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_model_style_accuracy_switch do? : tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_model_style_accuracy_switch. : tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_model_style_accuracy_switch", + "output": "tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_model_style_accuracy_switch", + "output": "tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament model style accuracy switch", + "output": "tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_model_style_accuracy_switch", + "output": "tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_model_style_accuracy_switch", + "output": "tournament model style accuracy switch config.toml: Accuracy equal and above which uses model style if tournament_style = 'auto' " + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_feature_style_accuracy_switch do? : tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_feature_style_accuracy_switch. : tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_feature_style_accuracy_switch", + "output": "tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_feature_style_accuracy_switch", + "output": "tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament feature style accuracy switch", + "output": "tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_feature_style_accuracy_switch", + "output": "tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_feature_style_accuracy_switch", + "output": "tournament feature style accuracy switch config.toml: Accuracy equal and above which uses feature style if tournament_style = 'auto' " + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_fullstack_style_accuracy_switch do? : tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_fullstack_style_accuracy_switch. : tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_fullstack_style_accuracy_switch", + "output": "tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_fullstack_style_accuracy_switch", + "output": "tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament fullstack style accuracy switch", + "output": "tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_fullstack_style_accuracy_switch", + "output": "tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_fullstack_style_accuracy_switch", + "output": "tournament fullstack style accuracy switch config.toml: Accuracy equal and above which uses fullstack style if tournament_style = 'auto' " + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_use_feature_penalized_score do? : tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_use_feature_penalized_score. : tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_use_feature_penalized_score", + "output": "tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_use_feature_penalized_score", + "output": "tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament use feature penalized score", + "output": "tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_use_feature_penalized_score", + "output": "tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_use_feature_penalized_score", + "output": "tournament use feature penalized score config.toml: Whether to use penalized score for GA tournament or actual score" + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_keep_poor_scores_for_small_data do? : tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_keep_poor_scores_for_small_data. : tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_keep_poor_scores_for_small_data", + "output": "tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_keep_poor_scores_for_small_data", + "output": "tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament keep poor scores for small data", + "output": "tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_keep_poor_scores_for_small_data", + "output": "tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_keep_poor_scores_for_small_data", + "output": "tournament keep poor scores for small data config.toml: Whether to keep poor scores for small data (<10k rows) in case exploration will find good model. sets tournament_remove_poor_scores_before_evolution_model_factor=1.1 tournament_remove_worse_than_constant_before_evolution=false tournament_keep_absolute_ok_scores_before_evolution_model_factor=1.1 tournament_remove_poor_scores_before_final_model_factor=1.1 tournament_remove_worse_than_constant_before_final_model=true" + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_remove_poor_scores_before_evolution_model_factor do? : tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_remove_poor_scores_before_evolution_model_factor. : tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_poor_scores_before_evolution_model_factor", + "output": "tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_poor_scores_before_evolution_model_factor", + "output": "tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament remove poor scores before evolution model factor", + "output": "tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_remove_poor_scores_before_evolution_model_factor", + "output": "tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_remove_poor_scores_before_evolution_model_factor", + "output": "tournament remove poor scores before evolution model factor config.toml: Factor (compared to best score plus each score) beyond which to drop poorly scoring models before evolution. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_remove_worse_than_constant_before_evolution do? : tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_remove_worse_than_constant_before_evolution. : tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_worse_than_constant_before_evolution", + "output": "tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_worse_than_constant_before_evolution", + "output": "tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament remove worse than constant before evolution", + "output": "tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_remove_worse_than_constant_before_evolution", + "output": "tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_remove_worse_than_constant_before_evolution", + "output": "tournament remove worse than constant before evolution config.toml: For before evolution after tuning, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_keep_absolute_ok_scores_before_evolution_model_factor do? : tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_keep_absolute_ok_scores_before_evolution_model_factor. : tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_keep_absolute_ok_scores_before_evolution_model_factor", + "output": "tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_keep_absolute_ok_scores_before_evolution_model_factor", + "output": "tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament keep absolute ok scores before evolution model factor", + "output": "tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_keep_absolute_ok_scores_before_evolution_model_factor", + "output": "tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_keep_absolute_ok_scores_before_evolution_model_factor", + "output": "tournament keep absolute ok scores before evolution model factor config.toml: For before evolution after tuning, where on scale of 0 (perfect) to 1 (constant model) to keep ok scores by absolute value." + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_remove_poor_scores_before_final_model_factor do? : tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_remove_poor_scores_before_final_model_factor. : tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_poor_scores_before_final_model_factor", + "output": "tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_poor_scores_before_final_model_factor", + "output": "tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament remove poor scores before final model factor", + "output": "tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_remove_poor_scores_before_final_model_factor", + "output": "tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_remove_poor_scores_before_final_model_factor", + "output": "tournament remove poor scores before final model factor config.toml: Factor (compared to best score) beyond which to drop poorly scoring models before building final ensemble. This is useful in cases when poorly scoring models take a long time to train." + }, + { + "prompt_type": "plain", + "instruction": ": What does tournament_remove_worse_than_constant_before_final_model do? : tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tournament_remove_worse_than_constant_before_final_model. : tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_worse_than_constant_before_final_model", + "output": "tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament_remove_worse_than_constant_before_final_model", + "output": "tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tournament remove worse than constant before final model", + "output": "tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tournament_remove_worse_than_constant_before_final_model", + "output": "tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tournament_remove_worse_than_constant_before_final_model", + "output": "tournament remove worse than constant before final model config.toml: For before final model after evolution, whether to remove models that are worse than (optimized to scorer) constant prediction model" + }, + { + "prompt_type": "plain", + "instruction": ": What does num_individuals do? : num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_individuals. : num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_individuals", + "output": "num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_individuals", + "output": "num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num individuals", + "output": "num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_individuals", + "output": "num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_individuals", + "output": "num individuals config.toml: Driverless AI uses a genetic algorithm (GA) to find the best features, best models and best hyper parameters for these models. The GA facilitates getting good results while not requiring torun/try every possible model/feature/parameter. This version of GA has reinforcement learning elements - it uses a form of exploration-exploitation to reach optimum solutions. This means it will capitalise on models/features/parameters that seem # to be working well and continue to exploit them even more, while allowing some room for trying new (and semi-random) models/features/parameters to avoid settling on a local minimum. These models/features/parameters tried are what-we-call individuals of a population. More # individuals connote more models/features/parameters to be tried and compete to find the best # ones." + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_num_individuals do? : fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_num_individuals. : fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_num_individuals", + "output": "fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_num_individuals", + "output": "fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed num individuals", + "output": "fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_num_individuals", + "output": "fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_num_individuals", + "output": "fixed num individuals config.toml: set fixed number of individuals (if > 0) - useful to compare different hardware configurations. If want 3 individuals in GA race to be preserved, choose 6, since need 1 mutatable loser per surviving individual." + }, + { + "prompt_type": "plain", + "instruction": ": What does sanitize_natural_sort_limit do? : sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "plain", + "instruction": ": Explain sanitize_natural_sort_limit. : sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sanitize_natural_sort_limit", + "output": "sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sanitize_natural_sort_limit", + "output": "sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sanitize natural sort limit", + "output": "sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting sanitize_natural_sort_limit", + "output": "sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting sanitize_natural_sort_limit", + "output": "sanitize natural sort limit config.toml: number of unique targets or folds counts after which switch to faster/simpler non-natural sorting and print outs" + }, + { + "prompt_type": "plain", + "instruction": ": What does head_tail_fold_id_report_length do? : head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain head_tail_fold_id_report_length. : head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "head_tail_fold_id_report_length", + "output": "head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "head_tail_fold_id_report_length", + "output": "head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "head tail fold id report length", + "output": "head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting head_tail_fold_id_report_length", + "output": "head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting head_tail_fold_id_report_length", + "output": "head tail fold id report length config.toml: number of fold ids to report cardinality for, both most common (head) and least common (tail)" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_target_encoding do? : enable target encoding config.toml: Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_target_encoding. : enable target encoding config.toml: Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Target Encoding (auto disables for time series): . : Set the enable target encoding config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_target_encoding", + "output": "enable target encoding config.toml: Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_target_encoding", + "output": "enable target encoding config.toml: Enable Target Encoding (auto disables for time series): Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable target encoding", + "output": "enable target encoding config.toml: Enable Target Encoding (auto disables for time series): Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Target Encoding (auto disables for time series): ", + "output": "enable target encoding config.toml: Enable Target Encoding (auto disables for time series): Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_target_encoding", + "output": "enable target encoding config.toml: Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_target_encoding", + "output": "enable target encoding config.toml: Enable Target Encoding (auto disables for time series): Whether target encoding (CV target encoding, weight of evidence, etc.) could be enabledTarget encoding refers to several different feature transformations (primarily focused oncategorical data) that aim to represent the feature using information of the actualtarget variable. A simple example can be to use the mean of the target to replace eachunique category of a categorical feature. This type of features can be very predictive,but are prone to overfitting and require more memory as they need to store mappings ofthe unique categories and the target values. " + }, + { + "prompt_type": "plain", + "instruction": ": What does cvte_cv_in_cv_use_model do? : cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "plain", + "instruction": ": Explain cvte_cv_in_cv_use_model. : cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cvte_cv_in_cv_use_model", + "output": "cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cvte_cv_in_cv_use_model", + "output": "cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cvte cv in cv use model", + "output": "cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cvte_cv_in_cv_use_model", + "output": "cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cvte_cv_in_cv_use_model", + "output": "cvte cv in cv use model config.toml: For target encoding, whether a model is used to compute Ginis for checking sanity of transformer. Requires cvte_cv_in_cv to be enabled. If enabled, CV-in-CV isn't done in case the check fails." + }, + { + "prompt_type": "plain", + "instruction": ": What does cvte_cv_in_cv do? : cvte cv in cv config.toml: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain cvte_cv_in_cv. : cvte cv in cv config.toml: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable outer CV for Target Encoding: . : Set the cvte cv in cv config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cvte_cv_in_cv", + "output": "cvte cv in cv config.toml: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cvte_cv_in_cv", + "output": "cvte cv in cv config.toml: Enable outer CV for Target Encoding: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cvte cv in cv", + "output": "cvte cv in cv config.toml: Enable outer CV for Target Encoding: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable outer CV for Target Encoding: ", + "output": "cvte cv in cv config.toml: Enable outer CV for Target Encoding: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cvte_cv_in_cv", + "output": "cvte cv in cv config.toml: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cvte_cv_in_cv", + "output": "cvte cv in cv config.toml: Enable outer CV for Target Encoding: For target encoding,whether an outer level of cross-fold validation is performed,in cases when GINI is detected to flip sign (or have inconsistent sign for weight of evidence)between fit_transform on training, transform on training, and transform on validation data.The degree to which GINI is poor is also used to perform fold-averaging of look-up tables insteadof using global look-up tables. " + }, + { + "prompt_type": "plain", + "instruction": ": What does cv_in_cv_overconfidence_protection do? : cv in cv overconfidence protection config.toml: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain cv_in_cv_overconfidence_protection. : cv in cv overconfidence protection config.toml: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable outer CV for Target Encoding with overconfidence protection: . : Set the cv in cv overconfidence protection config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cv_in_cv_overconfidence_protection", + "output": "cv in cv overconfidence protection config.toml: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cv_in_cv_overconfidence_protection", + "output": "cv in cv overconfidence protection config.toml: Enable outer CV for Target Encoding with overconfidence protection: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cv in cv overconfidence protection", + "output": "cv in cv overconfidence protection config.toml: Enable outer CV for Target Encoding with overconfidence protection: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable outer CV for Target Encoding with overconfidence protection: ", + "output": "cv in cv overconfidence protection config.toml: Enable outer CV for Target Encoding with overconfidence protection: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cv_in_cv_overconfidence_protection", + "output": "cv in cv overconfidence protection config.toml: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cv_in_cv_overconfidence_protection", + "output": "cv in cv overconfidence protection config.toml: Enable outer CV for Target Encoding with overconfidence protection: For target encoding,when an outer level of cross-fold validation is performed,increase number of outer folds or abort target encoding when GINI between feature and targetare not close between fit_transform on training, transform on training, and transform on validation data. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lexilabel_encoding do? : enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lexilabel_encoding. : enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lexilabel_encoding", + "output": "enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lexilabel_encoding", + "output": "enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lexilabel encoding", + "output": "enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Lexicographical Label Encoding: ", + "output": "enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lexilabel_encoding", + "output": "enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lexilabel_encoding", + "output": "enable lexilabel encoding config.toml: Enable Lexicographical Label Encoding: " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_isolation_forest do? : enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_isolation_forest. : enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_isolation_forest", + "output": "enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_isolation_forest", + "output": "enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable isolation forest", + "output": "enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Isolation Forest Anomaly Score Encoding: ", + "output": "enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_isolation_forest", + "output": "enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_isolation_forest", + "output": "enable isolation forest config.toml: Enable Isolation Forest Anomaly Score Encoding: " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_one_hot_encoding do? : enable one hot encoding config.toml: Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_one_hot_encoding. : enable one hot encoding config.toml: Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable One HotEncoding (auto enables only for GLM): . : Set the enable one hot encoding config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_one_hot_encoding", + "output": "enable one hot encoding config.toml: Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_one_hot_encoding", + "output": "enable one hot encoding config.toml: Enable One HotEncoding (auto enables only for GLM): Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable one hot encoding", + "output": "enable one hot encoding config.toml: Enable One HotEncoding (auto enables only for GLM): Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable One HotEncoding (auto enables only for GLM): ", + "output": "enable one hot encoding config.toml: Enable One HotEncoding (auto enables only for GLM): Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_one_hot_encoding", + "output": "enable one hot encoding config.toml: Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_one_hot_encoding", + "output": "enable one hot encoding config.toml: Enable One HotEncoding (auto enables only for GLM): Whether one hot encoding could be enabled. If auto, then only applied for small data and GLM." + }, + { + "prompt_type": "plain", + "instruction": ": What does binner_cardinality_limiter do? : binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "plain", + "instruction": ": Explain binner_cardinality_limiter. : binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_cardinality_limiter", + "output": "binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_cardinality_limiter", + "output": "binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner cardinality limiter", + "output": "binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting binner_cardinality_limiter", + "output": "binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting binner_cardinality_limiter", + "output": "binner cardinality limiter config.toml: Limit number of output features (total number of bins) created by all BinnerTransformers based on this value, scaled by accuracy, interpretability and dataset size. 0 means unlimited." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_binning do? : enable binning config.toml: Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_binning. : enable binning config.toml: Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable BinnerTransformer for simple numeric binning (auto enables only for GLM/FTRL/TensorFlow/GrowNet): . : Set the enable binning config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_binning", + "output": "enable binning config.toml: Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_binning", + "output": "enable binning config.toml: Enable BinnerTransformer for simple numeric binning (auto enables only for GLM/FTRL/TensorFlow/GrowNet): Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable binning", + "output": "enable binning config.toml: Enable BinnerTransformer for simple numeric binning (auto enables only for GLM/FTRL/TensorFlow/GrowNet): Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable BinnerTransformer for simple numeric binning (auto enables only for GLM/FTRL/TensorFlow/GrowNet): ", + "output": "enable binning config.toml: Enable BinnerTransformer for simple numeric binning (auto enables only for GLM/FTRL/TensorFlow/GrowNet): Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_binning", + "output": "enable binning config.toml: Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_binning", + "output": "enable binning config.toml: Enable BinnerTransformer for simple numeric binning (auto enables only for GLM/FTRL/TensorFlow/GrowNet): Whether simple binning of numeric features should be enabled by default. If auto, then only for GLM/FTRL/TensorFlow/GrowNet for time-series or for interpretability >= 6. Binning can help linear (or simple) models by exposing more signal for features that are not linearly correlated with the target. Note that NumCatTransformer and NumToCatTransformer already do binning, but also perform target encoding, which makes them less interpretable. The BinnerTransformer is more interpretable, and also works for time series." + }, + { + "prompt_type": "plain", + "instruction": ": What does binner_bin_method do? : binner bin method config.toml: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "plain", + "instruction": ": Explain binner_bin_method. : binner bin method config.toml: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select methods used to find bins for Binner Transformer: . : Set the binner bin method config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_bin_method", + "output": "binner bin method config.toml: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_bin_method", + "output": "binner bin method config.toml: Select methods used to find bins for Binner Transformer: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner bin method", + "output": "binner bin method config.toml: Select methods used to find bins for Binner Transformer: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select methods used to find bins for Binner Transformer: ", + "output": "binner bin method config.toml: Select methods used to find bins for Binner Transformer: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting binner_bin_method", + "output": "binner bin method config.toml: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting binner_bin_method", + "output": "binner bin method config.toml: Select methods used to find bins for Binner Transformer: Tree uses XGBoost to find optimal split points for binning of numeric features. Quantile use quantile-based binning. Might fall back to quantile-based if too many classes or not enough unique values." + }, + { + "prompt_type": "plain", + "instruction": ": What does binner_minimize_bins do? : binner minimize bins config.toml: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "plain", + "instruction": ": Explain binner_minimize_bins. : binner minimize bins config.toml: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable automatic reduction of number of bins for Binner Transformer: . : Set the binner minimize bins config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_minimize_bins", + "output": "binner minimize bins config.toml: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_minimize_bins", + "output": "binner minimize bins config.toml: Enable automatic reduction of number of bins for Binner Transformer: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner minimize bins", + "output": "binner minimize bins config.toml: Enable automatic reduction of number of bins for Binner Transformer: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable automatic reduction of number of bins for Binner Transformer: ", + "output": "binner minimize bins config.toml: Enable automatic reduction of number of bins for Binner Transformer: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting binner_minimize_bins", + "output": "binner minimize bins config.toml: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting binner_minimize_bins", + "output": "binner minimize bins config.toml: Enable automatic reduction of number of bins for Binner Transformer: If enabled, will attempt to reduce the number of bins during binning of numeric features. Applies to both tree-based and quantile-based bins." + }, + { + "prompt_type": "plain", + "instruction": ": What does binner_encoding do? : binner encoding config.toml: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "plain", + "instruction": ": Explain binner_encoding. : binner encoding config.toml: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select encoding schemes for Binner Transformer: . : Set the binner encoding config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_encoding", + "output": "binner encoding config.toml: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_encoding", + "output": "binner encoding config.toml: Select encoding schemes for Binner Transformer: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner encoding", + "output": "binner encoding config.toml: Select encoding schemes for Binner Transformer: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select encoding schemes for Binner Transformer: ", + "output": "binner encoding config.toml: Select encoding schemes for Binner Transformer: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting binner_encoding", + "output": "binner encoding config.toml: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting binner_encoding", + "output": "binner encoding config.toml: Select encoding schemes for Binner Transformer: Given a set of bins (cut points along min...max), the encoding scheme converts the original numeric feature values into the values of the output columns (one column per bin, and one extra bin for missing values if any). Piecewise linear is 0 left of the bin, and 1 right of the bin, and grows linearly from 0 to 1 inside the bin. Binary is 1 inside the bin and 0 outside the bin. Missing value bin encoding is always binary, either 0 or 1. If no missing values in the data, then there is no missing value bin. Piecewise linear helps to encode growing values and keeps smooth transitions across the bin boundaries, while binary is best suited for detecting specific values in the data. Both are great at providing features to models that otherwise lack non-linear pattern detection." + }, + { + "prompt_type": "plain", + "instruction": ": What does binner_include_original do? : binner include original config.toml: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain binner_include_original. : binner include original config.toml: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Include Original feature value as part of output of Binner Transformer: . : Set the binner include original config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_include_original", + "output": "binner include original config.toml: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_include_original", + "output": "binner include original config.toml: Include Original feature value as part of output of Binner Transformer: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner include original", + "output": "binner include original config.toml: Include Original feature value as part of output of Binner Transformer: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Include Original feature value as part of output of Binner Transformer: ", + "output": "binner include original config.toml: Include Original feature value as part of output of Binner Transformer: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting binner_include_original", + "output": "binner include original config.toml: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting binner_include_original", + "output": "binner include original config.toml: Include Original feature value as part of output of Binner Transformer: If enabled (default), include the original feature value as a output feature for the BinnerTransformer. This ensures that the BinnerTransformer never has less signal than the OriginalTransformer, since they can be chosen exclusively. " + }, + { + "prompt_type": "plain", + "instruction": ": What does isolation_forest_nestimators do? : isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain isolation_forest_nestimators. : isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "isolation_forest_nestimators", + "output": "isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "isolation_forest_nestimators", + "output": "isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "isolation forest nestimators", + "output": "isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. Estimators for Isolation Forest Encoding: ", + "output": "isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting isolation_forest_nestimators", + "output": "isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting isolation_forest_nestimators", + "output": "isolation forest nestimators config.toml: Num. Estimators for Isolation Forest Encoding: " + }, + { + "prompt_type": "plain", + "instruction": ": What does included_transformers do? : included transformers config.toml: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "plain", + "instruction": ": Explain included_transformers. : included transformers config.toml: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Include specific transformers: . : Set the included transformers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_transformers", + "output": "included transformers config.toml: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_transformers", + "output": "included transformers config.toml: Include specific transformers: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included transformers", + "output": "included transformers config.toml: Include specific transformers: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Include specific transformers: ", + "output": "included transformers config.toml: Include specific transformers: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting included_transformers", + "output": "included transformers config.toml: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting included_transformers", + "output": "included transformers config.toml: Include specific transformers: Transformer display names to indicate which transformers to use in experiment.More information for these transformers can be viewed here:http://docs.h2o.ai/driverless-ai/latest-stable/docs/userguide/transformations.htmlThis section allows including/excluding these transformations and may be useful whensimpler (more interpretable) models are sought at the expense of accuracy.the interpretability setting)for multi-class: '['NumCatTETransformer', 'TextLinModelTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'ClusterDistTransformer','WeightOfEvidenceTransformer', 'TruncSVDNumTransformer', 'CVCatNumEncodeTransformer','DatesTransformer', 'TextTransformer', 'OriginalTransformer','NumToCatWoETransformer', 'NumToCatTETransformer', 'ClusterTETransformer','InteractionsTransformer']'for regression/binary: '['TextTransformer', 'ClusterDistTransformer','OriginalTransformer', 'TextLinModelTransformer', 'NumToCatTETransformer','DatesTransformer', 'WeightOfEvidenceTransformer', 'InteractionsTransformer','FrequentTransformer', 'CVTargetEncodeTransformer', 'NumCatTETransformer','NumToCatWoETransformer', 'TruncSVDNumTransformer', 'ClusterTETransformer','CVCatNumEncodeTransformer']'This list appears in the experiment logs (search for 'Transformers used') " + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_transformers do? : excluded transformers config.toml: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_transformers. : excluded transformers config.toml: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific transformers: . : Set the excluded transformers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_transformers", + "output": "excluded transformers config.toml: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_transformers", + "output": "excluded transformers config.toml: Exclude specific transformers: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded transformers", + "output": "excluded transformers config.toml: Exclude specific transformers: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific transformers: ", + "output": "excluded transformers config.toml: Exclude specific transformers: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_transformers", + "output": "excluded transformers config.toml: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_transformers", + "output": "excluded transformers config.toml: Exclude specific transformers: Auxiliary to included_transformers e.g. to disable all Target Encoding: excluded_transformers = '['NumCatTETransformer', 'CVTargetEncodeF', 'NumToCatTETransformer', 'ClusterTETransformer']'. Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_genes do? : excluded genes config.toml: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_genes. : excluded genes config.toml: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific genes: . : Set the excluded genes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_genes", + "output": "excluded genes config.toml: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_genes", + "output": "excluded genes config.toml: Exclude specific genes: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded genes", + "output": "excluded genes config.toml: Exclude specific genes: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific genes: ", + "output": "excluded genes config.toml: Exclude specific genes: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_genes", + "output": "excluded genes config.toml: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_genes", + "output": "excluded genes config.toml: Exclude specific genes: Exclude list of genes (i.e. genes (built on top of transformers) to not use,independent of the interpretability setting)Some transformers are used by multiple genes, so this allows different control over feature engineeringfor multi-class: '['InteractionsGene', 'WeightOfEvidenceGene','NumToCatTargetEncodeSingleGene', 'OriginalGene', 'TextGene', 'FrequentGene','NumToCatWeightOfEvidenceGene', 'NumToCatWeightOfEvidenceMonotonicGene', 'CvTargetEncodeSingleGene', 'DateGene', 'NumToCatTargetEncodeMultiGene', 'DateTimeGene', 'TextLinRegressorGene', 'ClusterIDTargetEncodeSingleGene','CvCatNumEncodeGene', 'TruncSvdNumGene', 'ClusterIDTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'CvTargetEncodeMultiGene', 'TextLinClassifierGene','NumCatTargetEncodeSingleGene', 'ClusterDistGene']'for regression/binary: '['CvTargetEncodeSingleGene', 'NumToCatTargetEncodeSingleGene','CvCatNumEncodeGene', 'ClusterIDTargetEncodeSingleGene', 'TextLinRegressorGene','CvTargetEncodeMultiGene', 'ClusterDistGene', 'OriginalGene', 'DateGene','ClusterIDTargetEncodeMultiGene', 'NumToCatTargetEncodeMultiGene','NumCatTargetEncodeMultiGene', 'TextLinClassifierGene', 'WeightOfEvidenceGene','FrequentGene', 'TruncSvdNumGene', 'InteractionsGene', 'TextGene','DateTimeGene', 'NumToCatWeightOfEvidenceGene','NumToCatWeightOfEvidenceMonotonicGene', ''NumCatTargetEncodeSingleGene']'This list appears in the experiment logs (search for 'Genes used')e.g. to disable interaction gene, use: excluded_genes ='['InteractionsGene']'.Does not affect transformers used for preprocessing with included_pretransformers. " + }, + { + "prompt_type": "plain", + "instruction": ": What does included_models do? : included models config.toml: Include specific models: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain included_models. : included models config.toml: Include specific models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_models", + "output": "included models config.toml: Include specific models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_models", + "output": "included models config.toml: Include specific models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included models", + "output": "included models config.toml: Include specific models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Include specific models: ", + "output": "included models config.toml: Include specific models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting included_models", + "output": "included models config.toml: Include specific models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting included_models", + "output": "included models config.toml: Include specific models: " + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_models do? : excluded models config.toml: Auxiliary to included_models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_models. : excluded models config.toml: Auxiliary to included_models" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific models: . : Set the excluded models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_models", + "output": "excluded models config.toml: Auxiliary to included_models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_models", + "output": "excluded models config.toml: Exclude specific models: Auxiliary to included_models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded models", + "output": "excluded models config.toml: Exclude specific models: Auxiliary to included_models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific models: ", + "output": "excluded models config.toml: Exclude specific models: Auxiliary to included_models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_models", + "output": "excluded models config.toml: Auxiliary to included_models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_models", + "output": "excluded models config.toml: Exclude specific models: Auxiliary to included_models" + }, + { + "prompt_type": "plain", + "instruction": ": What does included_scorers do? : included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain included_scorers. : included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_scorers", + "output": "included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_scorers", + "output": "included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included scorers", + "output": "included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Include specific scorers: ", + "output": "included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting included_scorers", + "output": "included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting included_scorers", + "output": "included scorers config.toml: Include specific scorers: " + }, + { + "prompt_type": "plain", + "instruction": ": What does included_pretransformers do? : included pretransformers config.toml: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain included_pretransformers. : included pretransformers config.toml: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Include specific preprocessing transformers: . : Set the included pretransformers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_pretransformers", + "output": "included pretransformers config.toml: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_pretransformers", + "output": "included pretransformers config.toml: Include specific preprocessing transformers: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included pretransformers", + "output": "included pretransformers config.toml: Include specific preprocessing transformers: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Include specific preprocessing transformers: ", + "output": "included pretransformers config.toml: Include specific preprocessing transformers: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting included_pretransformers", + "output": "included pretransformers config.toml: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting included_pretransformers", + "output": "included pretransformers config.toml: Include specific preprocessing transformers: Select transformers to be used for preprocessing before other transformers operate.Pre-processing transformers can potentially take any original features and outputarbitrary features, which will then be used by the normal layer of transformerswhose selection is controlled by toml included_transformers or via the GUI\"Include specific transformers\".Notes:1) preprocessing transformers (and all other layers of transformers) are part of the python and (if applicable) mojo scoring packages.2) any BYOR transformer recipe or native DAI transformer can be used as a preprocessing transformer.So, e.g., a preprocessing transformer can do interactions, string concatenations, date extractions as a preprocessing step, and next layer of Date and DateTime transformers will use that as input data.Caveats:1) one cannot currently do a time-series experiment on a time_column that hasn't yet been made (setup of experiment only knows about original data, not transformed) However, one can use a run-time data recipe to (e.g.) convert a float date-time into string date-time, and this will be used by DAIs Date and DateTime transformers as well as auto-detection of time series.2) in order to do a time series experiment with the GUI/client auto-selecting groups, periods, etc. the dataset must have time column and groups prepared ahead of experiment by user or via a one-time data recipe. " + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_pretransformers do? : excluded pretransformers config.toml: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_pretransformers. : excluded pretransformers config.toml: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific pretransformers: . : Set the excluded pretransformers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_pretransformers", + "output": "excluded pretransformers config.toml: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_pretransformers", + "output": "excluded pretransformers config.toml: Exclude specific pretransformers: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded pretransformers", + "output": "excluded pretransformers config.toml: Exclude specific pretransformers: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific pretransformers: ", + "output": "excluded pretransformers config.toml: Exclude specific pretransformers: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_pretransformers", + "output": "excluded pretransformers config.toml: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_pretransformers", + "output": "excluded pretransformers config.toml: Exclude specific pretransformers: Auxiliary to included_pretransformers" + }, + { + "prompt_type": "plain", + "instruction": ": What does num_pipeline_layers do? : num pipeline layers config.toml: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_pipeline_layers. : num pipeline layers config.toml: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of pipeline layers: . : Set the num pipeline layers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_pipeline_layers", + "output": "num pipeline layers config.toml: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_pipeline_layers", + "output": "num pipeline layers config.toml: Number of pipeline layers: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num pipeline layers", + "output": "num pipeline layers config.toml: Number of pipeline layers: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of pipeline layers: ", + "output": "num pipeline layers config.toml: Number of pipeline layers: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_pipeline_layers", + "output": "num pipeline layers config.toml: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_pipeline_layers", + "output": "num pipeline layers config.toml: Number of pipeline layers: Number of full pipeline layers (not including preprocessing layer when included_pretransformers is not empty). " + }, + { + "prompt_type": "plain", + "instruction": ": What does included_datas do? : included datas config.toml: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain included_datas. : included datas config.toml: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Include specific data recipes during experiment: . : Set the included datas config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_datas", + "output": "included datas config.toml: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_datas", + "output": "included datas config.toml: Include specific data recipes during experiment: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included datas", + "output": "included datas config.toml: Include specific data recipes during experiment: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Include specific data recipes during experiment: ", + "output": "included datas config.toml: Include specific data recipes during experiment: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting included_datas", + "output": "included datas config.toml: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting included_datas", + "output": "included datas config.toml: Include specific data recipes during experiment: There are 2 data recipes:1) that adds new dataset or modifies dataset outside experiment by file/url (pre-experiment data recipe)2) that modifies dataset during experiment and python scoring (run-time data recipe)This list applies to the 2nd case. One can use the same data recipe code for either case, but note:A) the 1st case can make any new data, but is not part of scoring package.B) the 2nd case modifies data during the experiment, so needs some original dataset. The recipe can still create all new features, as long as it has same *name* for: target, weight_column, fold_column, time_column, time group columns. " + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_datas do? : excluded datas config.toml: Auxiliary to included_datas" + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_datas. : excluded datas config.toml: Auxiliary to included_datas" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific data recipes: . : Set the excluded datas config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_datas", + "output": "excluded datas config.toml: Auxiliary to included_datas" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_datas", + "output": "excluded datas config.toml: Exclude specific data recipes: Auxiliary to included_datas" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded datas", + "output": "excluded datas config.toml: Exclude specific data recipes: Auxiliary to included_datas" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific data recipes: ", + "output": "excluded datas config.toml: Exclude specific data recipes: Auxiliary to included_datas" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_datas", + "output": "excluded datas config.toml: Auxiliary to included_datas" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_datas", + "output": "excluded datas config.toml: Exclude specific data recipes: Auxiliary to included_datas" + }, + { + "prompt_type": "plain", + "instruction": ": What does included_individuals do? : included individuals config.toml: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "plain", + "instruction": ": Explain included_individuals. : included individuals config.toml: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Include specific individuals: . : Set the included individuals config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_individuals", + "output": "included individuals config.toml: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included_individuals", + "output": "included individuals config.toml: Include specific individuals: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "included individuals", + "output": "included individuals config.toml: Include specific individuals: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Include specific individuals: ", + "output": "included individuals config.toml: Include specific individuals: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting included_individuals", + "output": "included individuals config.toml: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting included_individuals", + "output": "included individuals config.toml: Include specific individuals: Custom individuals to use in experiment.DAI contains most information about model type, model hyperparameters, data science types for input features, transformers used, and transformer parameters an Individual Recipe (an object that is evolved by mutation within the context of DAI's genetic algorithm).Every completed experiment auto-generates python code for the experiment that corresponds to the individual(s) used to build the final model. This auto-generated python code can be edited offline and uploaded as a recipe, or it can be edited within the custom recipe management editor and saved. This allowed one a code-first access to a significant portion of DAI's internal transformer and model generation.Choices are:* Empty means all individuals are freshly generated and treated by DAI's AutoML as a container of model and transformer choices.* Recipe display names of custom individuals, usually chosen via the UI. If the number of included custom individuals is less than DAI would need, then the remaining individuals are freshly generated.The expert experiment-level option fixed_num_individuals can be used to enforce how many individuals to use in evolution stage.The expert experiment-level option fixed_ensemble_level can be used to enforce how many individuals (each with one base model) will be used in the final model.These individuals act in similar way as the feature brain acts for restart and retrain/refit, and one can retrain/refit custom individuals (i.e. skip the tuning and evolution stages) to use them in building a final model.See toml make_python_code for more details." + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_individuals do? : excluded individuals config.toml: Auxiliary to included_individuals" + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_individuals. : excluded individuals config.toml: Auxiliary to included_individuals" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific individual recipes: . : Set the excluded individuals config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_individuals", + "output": "excluded individuals config.toml: Auxiliary to included_individuals" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_individuals", + "output": "excluded individuals config.toml: Exclude specific individual recipes: Auxiliary to included_individuals" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded individuals", + "output": "excluded individuals config.toml: Exclude specific individual recipes: Auxiliary to included_individuals" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific individual recipes: ", + "output": "excluded individuals config.toml: Exclude specific individual recipes: Auxiliary to included_individuals" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_individuals", + "output": "excluded individuals config.toml: Auxiliary to included_individuals" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_individuals", + "output": "excluded individuals config.toml: Exclude specific individual recipes: Auxiliary to included_individuals" + }, + { + "prompt_type": "plain", + "instruction": ": What does make_python_code do? : make python code config.toml: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_python_code. : make python code config.toml: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Generate python code for individual: . : Set the make python code config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_python_code", + "output": "make python code config.toml: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_python_code", + "output": "make python code config.toml: Generate python code for individual: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make python code", + "output": "make python code config.toml: Generate python code for individual: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Generate python code for individual: ", + "output": "make python code config.toml: Generate python code for individual: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_python_code", + "output": "make python code config.toml: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_python_code", + "output": "make python code config.toml: Generate python code for individual: Whether to generate python code for the best individuals for the experiment.This python code contains a CustomIndividual class that is a recipe that can be edited and customized. The CustomIndividual class itself can also be customized for expert use.By default, 'auto' means on.At the end of an experiment, the summary zip contains auto-generated python code for the individuals used in the experiment, including the last best population (best_population_indivXX.py where XX iterates the population), last best individual (best_individual.py), final base models (final_indivYY.py where YY iterates the final base models).The summary zip also contains an example_indiv.py file that generates other transformers that may be useful that did not happen to be used in the experiment.In addition, the GUI and python client allow one to generate custom individuals from an aborted or finished experiment.For finished experiments, this will provide a zip file containing the final_indivYY.py files, and for aborted experiments this will contain the best population and best individual files.See included_individuals for more details." + }, + { + "prompt_type": "plain", + "instruction": ": What does make_json_code do? : make json code config.toml: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_json_code. : make json code config.toml: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Generate json code for individual: . : Set the make json code config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_json_code", + "output": "make json code config.toml: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_json_code", + "output": "make json code config.toml: Generate json code for individual: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make json code", + "output": "make json code config.toml: Generate json code for individual: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Generate json code for individual: ", + "output": "make json code config.toml: Generate json code for individual: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_json_code", + "output": "make json code config.toml: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_json_code", + "output": "make json code config.toml: Generate json code for individual: Whether to generate json code for the best individuals for the experiment. This python code contains the essential attributes from the internal DAI individual class. Reading the json code as a recipe is not supported. By default, 'auto' means off. " + }, + { + "prompt_type": "plain", + "instruction": ": What does python_code_ngenes_max do? : python code ngenes max config.toml: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain python_code_ngenes_max. : python code ngenes max config.toml: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. Num. genes for example auto-generated individual: . : Set the python code ngenes max config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_code_ngenes_max", + "output": "python code ngenes max config.toml: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_code_ngenes_max", + "output": "python code ngenes max config.toml: Max. Num. genes for example auto-generated individual: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python code ngenes max", + "output": "python code ngenes max config.toml: Max. Num. genes for example auto-generated individual: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. Num. genes for example auto-generated individual: ", + "output": "python code ngenes max config.toml: Max. Num. genes for example auto-generated individual: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting python_code_ngenes_max", + "output": "python code ngenes max config.toml: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting python_code_ngenes_max", + "output": "python code ngenes max config.toml: Max. Num. genes for example auto-generated individual: Maximum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": What does python_code_ngenes_min do? : python code ngenes min config.toml: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain python_code_ngenes_min. : python code ngenes min config.toml: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. Num. genes for example auto-generated individual: . : Set the python code ngenes min config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_code_ngenes_min", + "output": "python code ngenes min config.toml: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_code_ngenes_min", + "output": "python code ngenes min config.toml: Min. Num. genes for example auto-generated individual: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python code ngenes min", + "output": "python code ngenes min config.toml: Min. Num. genes for example auto-generated individual: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. Num. genes for example auto-generated individual: ", + "output": "python code ngenes min config.toml: Min. Num. genes for example auto-generated individual: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting python_code_ngenes_min", + "output": "python code ngenes min config.toml: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting python_code_ngenes_min", + "output": "python code ngenes min config.toml: Min. Num. genes for example auto-generated individual: Minimum number of genes to make for example auto-generated custom individual, called example_indiv.py in the summary zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": What does threshold_scorer do? : threshold scorer config.toml: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain threshold_scorer. : threshold scorer config.toml: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: For binary classification only: Scorer to optimize threshold to be used in confusion-matrix based scorers that are trivial to optimize and for label creation in MOJO/Python scorers.: . : Set the threshold scorer config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "threshold_scorer", + "output": "threshold scorer config.toml: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "threshold_scorer", + "output": "threshold scorer config.toml: For binary classification only: Scorer to optimize threshold to be used in confusion-matrix based scorers that are trivial to optimize and for label creation in MOJO/Python scorers.: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "threshold scorer", + "output": "threshold scorer config.toml: For binary classification only: Scorer to optimize threshold to be used in confusion-matrix based scorers that are trivial to optimize and for label creation in MOJO/Python scorers.: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "For binary classification only: Scorer to optimize threshold to be used in confusion-matrix based scorers that are trivial to optimize and for label creation in MOJO/Python scorers.: ", + "output": "threshold scorer config.toml: For binary classification only: Scorer to optimize threshold to be used in confusion-matrix based scorers that are trivial to optimize and for label creation in MOJO/Python scorers.: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting threshold_scorer", + "output": "threshold scorer config.toml: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting threshold_scorer", + "output": "threshold scorer config.toml: For binary classification only: Scorer to optimize threshold to be used in confusion-matrix based scorers that are trivial to optimize and for label creation in MOJO/Python scorers.: Select the scorer to optimize the binary probability threshold that is being used in related Confusion Matrix based scorers that are trivial to optimize otherwise: Precision, Recall, FalsePositiveRate, FalseDiscoveryRate, FalseOmissionRate, TrueNegativeRate, FalseNegativeRate, NegativePredictiveValue. Use F1 if the target class matters more, and MCC if all classes are equally important. AUTO will try to sync the threshold scorer with the scorer used for the experiment, otherwise falls back to F1. The optimized threshold is also used for creating labels in addition to probabilities in MOJO/Python scorers." + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_scorers do? : excluded scorers config.toml: Auxiliary to included_scorers" + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_scorers. : excluded scorers config.toml: Auxiliary to included_scorers" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific scorers: . : Set the excluded scorers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_scorers", + "output": "excluded scorers config.toml: Auxiliary to included_scorers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_scorers", + "output": "excluded scorers config.toml: Exclude specific scorers: Auxiliary to included_scorers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded scorers", + "output": "excluded scorers config.toml: Exclude specific scorers: Auxiliary to included_scorers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific scorers: ", + "output": "excluded scorers config.toml: Exclude specific scorers: Auxiliary to included_scorers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_scorers", + "output": "excluded scorers config.toml: Auxiliary to included_scorers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_scorers", + "output": "excluded scorers config.toml: Exclude specific scorers: Auxiliary to included_scorers" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_constant_model do? : enable constant model config.toml: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_constant_model. : enable constant model config.toml: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Constant models: . : Set the enable constant model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_constant_model", + "output": "enable constant model config.toml: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_constant_model", + "output": "enable constant model config.toml: Constant models: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable constant model", + "output": "enable constant model config.toml: Constant models: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Constant models: ", + "output": "enable constant model config.toml: Constant models: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_constant_model", + "output": "enable constant model config.toml: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_constant_model", + "output": "enable constant model config.toml: Constant models: Whether to enable constant models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_decision_tree do? : enable decision tree config.toml: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_decision_tree. : enable decision tree config.toml: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Decision Tree models: . : Set the enable decision tree config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_decision_tree", + "output": "enable decision tree config.toml: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_decision_tree", + "output": "enable decision tree config.toml: Decision Tree models: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable decision tree", + "output": "enable decision tree config.toml: Decision Tree models: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Decision Tree models: ", + "output": "enable decision tree config.toml: Decision Tree models: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_decision_tree", + "output": "enable decision tree config.toml: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_decision_tree", + "output": "enable decision tree config.toml: Decision Tree models: Whether to enable Decision Tree models ('auto'/'on'/'off'). 'auto' disables decision tree unless only non-constant model chosen." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_glm do? : enable glm config.toml: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_glm. : enable glm config.toml: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: GLM models: . : Set the enable glm config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_glm", + "output": "enable glm config.toml: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_glm", + "output": "enable glm config.toml: GLM models: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable glm", + "output": "enable glm config.toml: GLM models: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "GLM models: ", + "output": "enable glm config.toml: GLM models: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_glm", + "output": "enable glm config.toml: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_glm", + "output": "enable glm config.toml: GLM models: Whether to enable GLM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_glm_rapids do? : enable glm rapids config.toml: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_glm_rapids. : enable glm rapids config.toml: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable RAPIDS-cudf extensions to GLM: . : Set the enable glm rapids config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_glm_rapids", + "output": "enable glm rapids config.toml: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_glm_rapids", + "output": "enable glm rapids config.toml: Enable RAPIDS-cudf extensions to GLM: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable glm rapids", + "output": "enable glm rapids config.toml: Enable RAPIDS-cudf extensions to GLM: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable RAPIDS-cudf extensions to GLM: ", + "output": "enable glm rapids config.toml: Enable RAPIDS-cudf extensions to GLM: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_glm_rapids", + "output": "enable glm rapids config.toml: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_glm_rapids", + "output": "enable glm rapids config.toml: Enable RAPIDS-cudf extensions to GLM: Whether to enable RAPIDS extensions to GLM models (not available until fixes are in xgboost 1.3.0)" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_gbm do? : enable xgboost gbm config.toml: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_gbm. : enable xgboost gbm config.toml: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: XGBoost GBM models: . : Set the enable xgboost gbm config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_gbm", + "output": "enable xgboost gbm config.toml: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_gbm", + "output": "enable xgboost gbm config.toml: XGBoost GBM models: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost gbm", + "output": "enable xgboost gbm config.toml: XGBoost GBM models: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "XGBoost GBM models: ", + "output": "enable xgboost gbm config.toml: XGBoost GBM models: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_gbm", + "output": "enable xgboost gbm config.toml: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_gbm", + "output": "enable xgboost gbm config.toml: XGBoost GBM models: Whether to enable XGBoost GBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm do? : enable lightgbm config.toml: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm. : enable lightgbm config.toml: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM models: . : Set the enable lightgbm config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm", + "output": "enable lightgbm config.toml: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm", + "output": "enable lightgbm config.toml: LightGBM models: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm", + "output": "enable lightgbm config.toml: LightGBM models: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM models: ", + "output": "enable lightgbm config.toml: LightGBM models: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm", + "output": "enable lightgbm config.toml: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm", + "output": "enable lightgbm config.toml: LightGBM models: Whether to enable LightGBM models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_tensorflow do? : enable tensorflow config.toml: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_tensorflow. : enable tensorflow config.toml: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: TensorFlow models: . : Set the enable tensorflow config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow", + "output": "enable tensorflow config.toml: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_tensorflow", + "output": "enable tensorflow config.toml: TensorFlow models: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable tensorflow", + "output": "enable tensorflow config.toml: TensorFlow models: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "TensorFlow models: ", + "output": "enable tensorflow config.toml: TensorFlow models: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_tensorflow", + "output": "enable tensorflow config.toml: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_tensorflow", + "output": "enable tensorflow config.toml: TensorFlow models: Whether to enable TensorFlow models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_grownet do? : enable grownet config.toml: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_grownet. : enable grownet config.toml: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: PyTorch GrowNet models: . : Set the enable grownet config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_grownet", + "output": "enable grownet config.toml: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_grownet", + "output": "enable grownet config.toml: PyTorch GrowNet models: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable grownet", + "output": "enable grownet config.toml: PyTorch GrowNet models: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "PyTorch GrowNet models: ", + "output": "enable grownet config.toml: PyTorch GrowNet models: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_grownet", + "output": "enable grownet config.toml: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_grownet", + "output": "enable grownet config.toml: PyTorch GrowNet models: Whether to enable PyTorch-based GrowNet models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_ftrl do? : enable ftrl config.toml: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_ftrl. : enable ftrl config.toml: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: FTRL models: . : Set the enable ftrl config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_ftrl", + "output": "enable ftrl config.toml: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_ftrl", + "output": "enable ftrl config.toml: FTRL models: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable ftrl", + "output": "enable ftrl config.toml: FTRL models: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "FTRL models: ", + "output": "enable ftrl config.toml: FTRL models: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_ftrl", + "output": "enable ftrl config.toml: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_ftrl", + "output": "enable ftrl config.toml: FTRL models: Whether to enable FTRL support (follow the regularized leader) model ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_rulefit do? : enable rulefit config.toml: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_rulefit. : enable rulefit config.toml: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: RuleFit models: . : Set the enable rulefit config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rulefit", + "output": "enable rulefit config.toml: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rulefit", + "output": "enable rulefit config.toml: RuleFit models: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable rulefit", + "output": "enable rulefit config.toml: RuleFit models: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "RuleFit models: ", + "output": "enable rulefit config.toml: RuleFit models: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_rulefit", + "output": "enable rulefit config.toml: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_rulefit", + "output": "enable rulefit config.toml: RuleFit models: Whether to enable RuleFit support (beta version, no mojo) ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_zero_inflated_models do? : enable zero inflated models config.toml: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_zero_inflated_models. : enable zero inflated models config.toml: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Zero-Inflated models: . : Set the enable zero inflated models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_zero_inflated_models", + "output": "enable zero inflated models config.toml: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_zero_inflated_models", + "output": "enable zero inflated models config.toml: Zero-Inflated models: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable zero inflated models", + "output": "enable zero inflated models config.toml: Zero-Inflated models: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Zero-Inflated models: ", + "output": "enable zero inflated models config.toml: Zero-Inflated models: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_zero_inflated_models", + "output": "enable zero inflated models config.toml: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_zero_inflated_models", + "output": "enable zero inflated models config.toml: Zero-Inflated models: Whether to enable automatic addition of zero-inflated models for regression problems with zero-inflated target values that meet certain conditions: y >= 0, y.std() > y.mean()" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_rapids do? : enable xgboost rapids config.toml: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_rapids. : enable xgboost rapids config.toml: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable RAPIDS-cudf extensions to XGBoost GBM/Dart: . : Set the enable xgboost rapids config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_rapids", + "output": "enable xgboost rapids config.toml: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_rapids", + "output": "enable xgboost rapids config.toml: Enable RAPIDS-cudf extensions to XGBoost GBM/Dart: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost rapids", + "output": "enable xgboost rapids config.toml: Enable RAPIDS-cudf extensions to XGBoost GBM/Dart: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable RAPIDS-cudf extensions to XGBoost GBM/Dart: ", + "output": "enable xgboost rapids config.toml: Enable RAPIDS-cudf extensions to XGBoost GBM/Dart: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_rapids", + "output": "enable xgboost rapids config.toml: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_rapids", + "output": "enable xgboost rapids config.toml: Enable RAPIDS-cudf extensions to XGBoost GBM/Dart: Whether to enable RAPIDS extensions to XGBoost GBM/Dart. If selected, python scoring package can only be used on GPU system." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_rapids_cuml_models do? : enable rapids cuml models config.toml: Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_rapids_cuml_models. : enable rapids cuml models config.toml: Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to enable RAPIDS CUML GPU models (no mojo): . : Set the enable rapids cuml models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_cuml_models", + "output": "enable rapids cuml models config.toml: Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_cuml_models", + "output": "enable rapids cuml models config.toml: Whether to enable RAPIDS CUML GPU models (no mojo): Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable rapids cuml models", + "output": "enable rapids cuml models config.toml: Whether to enable RAPIDS CUML GPU models (no mojo): Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable RAPIDS CUML GPU models (no mojo): ", + "output": "enable rapids cuml models config.toml: Whether to enable RAPIDS CUML GPU models (no mojo): Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_rapids_cuml_models", + "output": "enable rapids cuml models config.toml: Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_rapids_cuml_models", + "output": "enable rapids cuml models config.toml: Whether to enable RAPIDS CUML GPU models (no mojo): Whether to enable GPU-based RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_rapids_models_dask do? : enable rapids models dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_rapids_models_dask. : enable rapids models dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to enable RAPIDS CUML GPU models to use Dask (no mojo): . : Set the enable rapids models dask config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_models_dask", + "output": "enable rapids models dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_models_dask", + "output": "enable rapids models dask config.toml: Whether to enable RAPIDS CUML GPU models to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable rapids models dask", + "output": "enable rapids models dask config.toml: Whether to enable RAPIDS CUML GPU models to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable RAPIDS CUML GPU models to use Dask (no mojo): ", + "output": "enable rapids models dask config.toml: Whether to enable RAPIDS CUML GPU models to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_rapids_models_dask", + "output": "enable rapids models dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_rapids_models_dask", + "output": "enable rapids models dask config.toml: Whether to enable RAPIDS CUML GPU models to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS CUML models.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": What does use_dask_for_1_gpu do? : use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_dask_for_1_gpu. : use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_dask_for_1_gpu", + "output": "use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_dask_for_1_gpu", + "output": "use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use dask for 1 gpu", + "output": "use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_dask_for_1_gpu", + "output": "use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_dask_for_1_gpu", + "output": "use dask for 1 gpu config.toml: Whether to use dask_cudf even for 1 GPU. If False, will use plain cudf." + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_retrials_allreduce_empty_issue do? : dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_retrials_allreduce_empty_issue. : dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_retrials_allreduce_empty_issue", + "output": "dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_retrials_allreduce_empty_issue", + "output": "dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask retrials allreduce empty issue", + "output": "dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_retrials_allreduce_empty_issue", + "output": "dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_retrials_allreduce_empty_issue", + "output": "dask retrials allreduce empty issue config.toml: Number of retrials for dask fit to protect against known xgboost issues https://github.com/dmlc/xgboost/issues/6272 https://github.com/dmlc/xgboost/issues/6551" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_rf do? : enable xgboost rf config.toml: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_rf. : enable xgboost rf config.toml: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable XGBoost RF mode: . : Set the enable xgboost rf config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_rf", + "output": "enable xgboost rf config.toml: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_rf", + "output": "enable xgboost rf config.toml: Enable XGBoost RF mode: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost rf", + "output": "enable xgboost rf config.toml: Enable XGBoost RF mode: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable XGBoost RF mode: ", + "output": "enable xgboost rf config.toml: Enable XGBoost RF mode: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_rf", + "output": "enable xgboost rf config.toml: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_rf", + "output": "enable xgboost rf config.toml: Enable XGBoost RF mode: Whether to enable XGBoost RF mode without early stopping. Disabled unless switched on. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_gbm_dask do? : enable xgboost gbm dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_gbm_dask. : enable xgboost gbm dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable dask_cudf (multi-GPU) XGBoost GBM/RF: . : Set the enable xgboost gbm dask config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_gbm_dask", + "output": "enable xgboost gbm dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_gbm_dask", + "output": "enable xgboost gbm dask config.toml: Enable dask_cudf (multi-GPU) XGBoost GBM/RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost gbm dask", + "output": "enable xgboost gbm dask config.toml: Enable dask_cudf (multi-GPU) XGBoost GBM/RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable dask_cudf (multi-GPU) XGBoost GBM/RF: ", + "output": "enable xgboost gbm dask config.toml: Enable dask_cudf (multi-GPU) XGBoost GBM/RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_gbm_dask", + "output": "enable xgboost gbm dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_gbm_dask", + "output": "enable xgboost gbm dask config.toml: Enable dask_cudf (multi-GPU) XGBoost GBM/RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost GBM/RF. Disabled unless switched on. Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_dask do? : enable lightgbm dask config.toml: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_dask. : enable lightgbm dask config.toml: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable dask (multi-node) LightGBM: . : Set the enable lightgbm dask config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_dask", + "output": "enable lightgbm dask config.toml: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_dask", + "output": "enable lightgbm dask config.toml: Enable dask (multi-node) LightGBM: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm dask", + "output": "enable lightgbm dask config.toml: Enable dask (multi-node) LightGBM: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable dask (multi-node) LightGBM: ", + "output": "enable lightgbm dask config.toml: Enable dask (multi-node) LightGBM: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_dask", + "output": "enable lightgbm dask config.toml: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_dask", + "output": "enable lightgbm dask config.toml: Enable dask (multi-node) LightGBM: Whether to enable multi-node LightGBM. Disabled unless switched on. " + }, + { + "prompt_type": "plain", + "instruction": ": What does hyperopt_shift_leak do? : hyperopt shift leak config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain hyperopt_shift_leak. : hyperopt shift leak config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to do hyperopt for leakage/shift: . : Set the hyperopt shift leak config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hyperopt_shift_leak", + "output": "hyperopt shift leak config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hyperopt_shift_leak", + "output": "hyperopt shift leak config.toml: Whether to do hyperopt for leakage/shift: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hyperopt shift leak", + "output": "hyperopt shift leak config.toml: Whether to do hyperopt for leakage/shift: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to do hyperopt for leakage/shift: ", + "output": "hyperopt shift leak config.toml: Whether to do hyperopt for leakage/shift: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hyperopt_shift_leak", + "output": "hyperopt shift leak config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hyperopt_shift_leak", + "output": "hyperopt shift leak config.toml: Whether to do hyperopt for leakage/shift: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection. Might be useful to find non-trivial leakage/shift, but usually not necessary. " + }, + { + "prompt_type": "plain", + "instruction": ": What does hyperopt_shift_leak_per_column do? : hyperopt shift leak per column config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain hyperopt_shift_leak_per_column. : hyperopt shift leak per column config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to do hyperopt for leakage/shift for each column: . : Set the hyperopt shift leak per column config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hyperopt_shift_leak_per_column", + "output": "hyperopt shift leak per column config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hyperopt_shift_leak_per_column", + "output": "hyperopt shift leak per column config.toml: Whether to do hyperopt for leakage/shift for each column: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hyperopt shift leak per column", + "output": "hyperopt shift leak per column config.toml: Whether to do hyperopt for leakage/shift for each column: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to do hyperopt for leakage/shift for each column: ", + "output": "hyperopt shift leak per column config.toml: Whether to do hyperopt for leakage/shift for each column: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hyperopt_shift_leak_per_column", + "output": "hyperopt shift leak per column config.toml: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hyperopt_shift_leak_per_column", + "output": "hyperopt shift leak per column config.toml: Whether to do hyperopt for leakage/shift for each column: If num_inner_hyperopt_trials_prefinal > 0, then whether to do hyper parameter tuning during leakage/shift detection, when checking each column. " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_inner_hyperopt_trials_prefinal do? : num inner hyperopt trials prefinal config.toml: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_inner_hyperopt_trials_prefinal. : num inner hyperopt trials prefinal config.toml: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of trials for hyperparameter optimization during model tuning only: . : Set the num inner hyperopt trials prefinal config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_inner_hyperopt_trials_prefinal", + "output": "num inner hyperopt trials prefinal config.toml: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_inner_hyperopt_trials_prefinal", + "output": "num inner hyperopt trials prefinal config.toml: Number of trials for hyperparameter optimization during model tuning only: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num inner hyperopt trials prefinal", + "output": "num inner hyperopt trials prefinal config.toml: Number of trials for hyperparameter optimization during model tuning only: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of trials for hyperparameter optimization during model tuning only: ", + "output": "num inner hyperopt trials prefinal config.toml: Number of trials for hyperparameter optimization during model tuning only: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_inner_hyperopt_trials_prefinal", + "output": "num inner hyperopt trials prefinal config.toml: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_inner_hyperopt_trials_prefinal", + "output": "num inner hyperopt trials prefinal config.toml: Number of trials for hyperparameter optimization during model tuning only: Number of trials for Optuna hyperparameter optimization for tuning and evolution models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, can overfit on a single fold when doing tuning or evolution, and if using CV then averaging the fold hyperparameters can lead to unexpected results. " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_inner_hyperopt_trials_final do? : num inner hyperopt trials final config.toml: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_inner_hyperopt_trials_final. : num inner hyperopt trials final config.toml: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of trials for hyperparameter optimization for final model only: . : Set the num inner hyperopt trials final config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_inner_hyperopt_trials_final", + "output": "num inner hyperopt trials final config.toml: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_inner_hyperopt_trials_final", + "output": "num inner hyperopt trials final config.toml: Number of trials for hyperparameter optimization for final model only: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num inner hyperopt trials final", + "output": "num inner hyperopt trials final config.toml: Number of trials for hyperparameter optimization for final model only: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of trials for hyperparameter optimization for final model only: ", + "output": "num inner hyperopt trials final config.toml: Number of trials for hyperparameter optimization for final model only: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_inner_hyperopt_trials_final", + "output": "num inner hyperopt trials final config.toml: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_inner_hyperopt_trials_final", + "output": "num inner hyperopt trials final config.toml: Number of trials for hyperparameter optimization for final model only: Number of trials for Optuna hyperparameter optimization for final models. 0 means no trials. For small data, 100 is ok choice, while for larger data smaller values are reasonable if need results quickly. Applies to final model only even if num_inner_hyperopt_trials=0. If using RAPIDS or DASK, hyperparameter optimization keeps data on GPU entire time. Currently applies to XGBoost GBM/Dart and LightGBM. Useful when there is high overhead of DAI outside inner model fit/predict, so this tunes without that overhead. However, for final model each fold is independently optimized and can overfit on each fold, after which predictions are averaged (so no issue with averaging hyperparameters when doing CV with tuning or evolution). " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_hyperopt_individuals_final do? : num hyperopt individuals final config.toml: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_hyperopt_individuals_final. : num hyperopt individuals final config.toml: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of individuals in final ensemble to use Optuna on: . : Set the num hyperopt individuals final config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_hyperopt_individuals_final", + "output": "num hyperopt individuals final config.toml: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_hyperopt_individuals_final", + "output": "num hyperopt individuals final config.toml: Number of individuals in final ensemble to use Optuna on: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num hyperopt individuals final", + "output": "num hyperopt individuals final config.toml: Number of individuals in final ensemble to use Optuna on: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of individuals in final ensemble to use Optuna on: ", + "output": "num hyperopt individuals final config.toml: Number of individuals in final ensemble to use Optuna on: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_hyperopt_individuals_final", + "output": "num hyperopt individuals final config.toml: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_hyperopt_individuals_final", + "output": "num hyperopt individuals final config.toml: Number of individuals in final ensemble to use Optuna on: Number of individuals in final model (all folds/repeats for given base model) tooptimize with Optuna hyperparameter tuning. -1 means all. 0 is same as choosing no Optuna trials. Might be only beneficial to optimize hyperparameters of best individual (i.e. value of 1) in ensemble. " + }, + { + "prompt_type": "plain", + "instruction": ": What does optuna_pruner do? : optuna pruner config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "plain", + "instruction": ": Explain optuna_pruner. : optuna pruner config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Optuna Pruners: . : Set the optuna pruner config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_pruner", + "output": "optuna pruner config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_pruner", + "output": "optuna pruner config.toml: Optuna Pruners: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna pruner", + "output": "optuna pruner config.toml: Optuna Pruners: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Optuna Pruners: ", + "output": "optuna pruner config.toml: Optuna Pruners: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting optuna_pruner", + "output": "optuna pruner config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting optuna_pruner", + "output": "optuna pruner config.toml: Optuna Pruners: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks). To disable choose None." + }, + { + "prompt_type": "plain", + "instruction": ": What does optuna_pruner_kwargs do? : optuna pruner kwargs config.toml: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain optuna_pruner_kwargs. : optuna pruner kwargs config.toml: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set Optuna pruner constructor args.: . : Set the optuna pruner kwargs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_pruner_kwargs", + "output": "optuna pruner kwargs config.toml: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_pruner_kwargs", + "output": "optuna pruner kwargs config.toml: Set Optuna pruner constructor args.: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna pruner kwargs", + "output": "optuna pruner kwargs config.toml: Set Optuna pruner constructor args.: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set Optuna pruner constructor args.: ", + "output": "optuna pruner kwargs config.toml: Set Optuna pruner constructor args.: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting optuna_pruner_kwargs", + "output": "optuna pruner kwargs config.toml: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting optuna_pruner_kwargs", + "output": "optuna pruner kwargs config.toml: Set Optuna pruner constructor args.: Set Optuna constructor arguments for particular applicable pruners. https://optuna.readthedocs.io/en/stable/reference/pruners.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does optuna_sampler do? : optuna sampler config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain optuna_sampler. : optuna sampler config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Optuna Samplers: . : Set the optuna sampler config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_sampler", + "output": "optuna sampler config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_sampler", + "output": "optuna sampler config.toml: Optuna Samplers: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna sampler", + "output": "optuna sampler config.toml: Optuna Samplers: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Optuna Samplers: ", + "output": "optuna sampler config.toml: Optuna Samplers: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting optuna_sampler", + "output": "optuna sampler config.toml: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting optuna_sampler", + "output": "optuna sampler config.toml: Optuna Samplers: Optuna Pruner to use (applicable to XGBoost and LightGBM that support Optuna callbacks)." + }, + { + "prompt_type": "plain", + "instruction": ": What does optuna_sampler_kwargs do? : optuna sampler kwargs config.toml: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain optuna_sampler_kwargs. : optuna sampler kwargs config.toml: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set Optuna sampler constructor args.: . : Set the optuna sampler kwargs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_sampler_kwargs", + "output": "optuna sampler kwargs config.toml: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna_sampler_kwargs", + "output": "optuna sampler kwargs config.toml: Set Optuna sampler constructor args.: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "optuna sampler kwargs", + "output": "optuna sampler kwargs config.toml: Set Optuna sampler constructor args.: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set Optuna sampler constructor args.: ", + "output": "optuna sampler kwargs config.toml: Set Optuna sampler constructor args.: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting optuna_sampler_kwargs", + "output": "optuna sampler kwargs config.toml: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting optuna_sampler_kwargs", + "output": "optuna sampler kwargs config.toml: Set Optuna sampler constructor args.: Set Optuna constructor arguments for particular applicable samplers. https://optuna.readthedocs.io/en/stable/reference/samplers.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_hyperopt_callback do? : enable xgboost hyperopt callback config.toml: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_hyperopt_callback. : enable xgboost hyperopt callback config.toml: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Optuna XGBoost Pruning callback: . : Set the enable xgboost hyperopt callback config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_hyperopt_callback", + "output": "enable xgboost hyperopt callback config.toml: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_hyperopt_callback", + "output": "enable xgboost hyperopt callback config.toml: Enable Optuna XGBoost Pruning callback: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost hyperopt callback", + "output": "enable xgboost hyperopt callback config.toml: Enable Optuna XGBoost Pruning callback: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Optuna XGBoost Pruning callback: ", + "output": "enable xgboost hyperopt callback config.toml: Enable Optuna XGBoost Pruning callback: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_hyperopt_callback", + "output": "enable xgboost hyperopt callback config.toml: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_hyperopt_callback", + "output": "enable xgboost hyperopt callback config.toml: Enable Optuna XGBoost Pruning callback: Whether to enable Optuna's XGBoost Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_hyperopt_callback do? : enable lightgbm hyperopt callback config.toml: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_hyperopt_callback. : enable lightgbm hyperopt callback config.toml: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Optuna LightGBM Pruning callback: . : Set the enable lightgbm hyperopt callback config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_hyperopt_callback", + "output": "enable lightgbm hyperopt callback config.toml: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_hyperopt_callback", + "output": "enable lightgbm hyperopt callback config.toml: Enable Optuna LightGBM Pruning callback: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm hyperopt callback", + "output": "enable lightgbm hyperopt callback config.toml: Enable Optuna LightGBM Pruning callback: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Optuna LightGBM Pruning callback: ", + "output": "enable lightgbm hyperopt callback config.toml: Enable Optuna LightGBM Pruning callback: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_hyperopt_callback", + "output": "enable lightgbm hyperopt callback config.toml: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_hyperopt_callback", + "output": "enable lightgbm hyperopt callback config.toml: Enable Optuna LightGBM Pruning callback: Whether to enable Optuna's LightGBM Pruning callback to abort unpromising runs. Not done if tuning learning rate." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_dart do? : enable xgboost dart config.toml: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_dart. : enable xgboost dart config.toml: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: XGBoost Dart models: . : Set the enable xgboost dart config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_dart", + "output": "enable xgboost dart config.toml: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_dart", + "output": "enable xgboost dart config.toml: XGBoost Dart models: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost dart", + "output": "enable xgboost dart config.toml: XGBoost Dart models: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "XGBoost Dart models: ", + "output": "enable xgboost dart config.toml: XGBoost Dart models: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_dart", + "output": "enable xgboost dart config.toml: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_dart", + "output": "enable xgboost dart config.toml: XGBoost Dart models: Whether to enable XGBoost Dart models ('auto'/'on'/'off')" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_dart_dask do? : enable xgboost dart dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_dart_dask. : enable xgboost dart dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable dask_cudf (multi-GPU) XGBoost Dart: . : Set the enable xgboost dart dask config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_dart_dask", + "output": "enable xgboost dart dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_dart_dask", + "output": "enable xgboost dart dask config.toml: Enable dask_cudf (multi-GPU) XGBoost Dart: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost dart dask", + "output": "enable xgboost dart dask config.toml: Enable dask_cudf (multi-GPU) XGBoost Dart: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable dask_cudf (multi-GPU) XGBoost Dart: ", + "output": "enable xgboost dart dask config.toml: Enable dask_cudf (multi-GPU) XGBoost Dart: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_dart_dask", + "output": "enable xgboost dart dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_dart_dask", + "output": "enable xgboost dart dask config.toml: Enable dask_cudf (multi-GPU) XGBoost Dart: Whether to enable dask_cudf (multi-GPU) version of XGBoost Dart. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xgboost_rf_dask do? : enable xgboost rf dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xgboost_rf_dask. : enable xgboost rf dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable dask_cudf (multi-GPU) XGBoost RF: . : Set the enable xgboost rf dask config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_rf_dask", + "output": "enable xgboost rf dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xgboost_rf_dask", + "output": "enable xgboost rf dask config.toml: Enable dask_cudf (multi-GPU) XGBoost RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xgboost rf dask", + "output": "enable xgboost rf dask config.toml: Enable dask_cudf (multi-GPU) XGBoost RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable dask_cudf (multi-GPU) XGBoost RF: ", + "output": "enable xgboost rf dask config.toml: Enable dask_cudf (multi-GPU) XGBoost RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xgboost_rf_dask", + "output": "enable xgboost rf dask config.toml: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xgboost_rf_dask", + "output": "enable xgboost rf dask config.toml: Enable dask_cudf (multi-GPU) XGBoost RF: Whether to enable dask_cudf (multi-GPU) version of XGBoost RF. Disabled unless switched on. If have only 1 GPU, then only uses dask_cudf if use_dask_for_1_gpu is True Only applicable for single final model without early stopping. No Shapley possible. " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_gpus_per_hyperopt_dask do? : num gpus per hyperopt dask config.toml: Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_gpus_per_hyperopt_dask. : num gpus per hyperopt dask config.toml: Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: #GPUs/HyperOptDask (-1 = all): . : Set the num gpus per hyperopt dask config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_per_hyperopt_dask", + "output": "num gpus per hyperopt dask config.toml: Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_per_hyperopt_dask", + "output": "num gpus per hyperopt dask config.toml: #GPUs/HyperOptDask (-1 = all): Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num gpus per hyperopt dask", + "output": "num gpus per hyperopt dask config.toml: #GPUs/HyperOptDask (-1 = all): Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "#GPUs/HyperOptDask (-1 = all): ", + "output": "num gpus per hyperopt dask config.toml: #GPUs/HyperOptDask (-1 = all): Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_gpus_per_hyperopt_dask", + "output": "num gpus per hyperopt dask config.toml: Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_gpus_per_hyperopt_dask", + "output": "num gpus per hyperopt dask config.toml: #GPUs/HyperOptDask (-1 = all): Number of GPUs to use per model hyperopt training task. Set to -1 for all GPUs.For example, when this is set to -1 and there are 4 GPUs available, all of them can be used for the training of a single model across a Dask cluster.Ignored if GPUs disabled or no GPUs on system.In multinode context, this refers to the per-node value. " + }, + { + "prompt_type": "plain", + "instruction": ": What does use_xgboost_xgbfi do? : use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_xgboost_xgbfi. : use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_xgboost_xgbfi", + "output": "use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_xgboost_xgbfi", + "output": "use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use xgboost xgbfi", + "output": "use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_xgboost_xgbfi", + "output": "use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_xgboost_xgbfi", + "output": "use xgboost xgbfi config.toml: Whether to use (and expect exists) xgbfi feature interactions for xgboost." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_boosting_types do? : enable lightgbm boosting types config.toml: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_boosting_types. : enable lightgbm boosting types config.toml: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM Boosting types: . : Set the enable lightgbm boosting types config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_boosting_types", + "output": "enable lightgbm boosting types config.toml: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_boosting_types", + "output": "enable lightgbm boosting types config.toml: LightGBM Boosting types: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm boosting types", + "output": "enable lightgbm boosting types config.toml: LightGBM Boosting types: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM Boosting types: ", + "output": "enable lightgbm boosting types config.toml: LightGBM Boosting types: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_boosting_types", + "output": "enable lightgbm boosting types config.toml: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_boosting_types", + "output": "enable lightgbm boosting types config.toml: LightGBM Boosting types: Which boosting types to enable for LightGBM (gbdt = boosted trees, rf_early_stopping = random forest with early stopping rf = random forest (no early stopping), dart = drop-out boosted trees with no early stopping" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_multiclass_balancing do? : enable lightgbm multiclass balancing config.toml: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_multiclass_balancing. : enable lightgbm multiclass balancing config.toml: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM multiclass balancing: . : Set the enable lightgbm multiclass balancing config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_multiclass_balancing", + "output": "enable lightgbm multiclass balancing config.toml: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_multiclass_balancing", + "output": "enable lightgbm multiclass balancing config.toml: LightGBM multiclass balancing: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm multiclass balancing", + "output": "enable lightgbm multiclass balancing config.toml: LightGBM multiclass balancing: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM multiclass balancing: ", + "output": "enable lightgbm multiclass balancing config.toml: LightGBM multiclass balancing: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_multiclass_balancing", + "output": "enable lightgbm multiclass balancing config.toml: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_multiclass_balancing", + "output": "enable lightgbm multiclass balancing config.toml: LightGBM multiclass balancing: Whether to enable automatic class weighting for imbalanced multiclass problems. Can make worse probabilities, but improve confusion-matrix based scorers for rare classes without the need to manually calibrate probabilities or fine-tune the label creation process." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_cat_support do? : enable lightgbm cat support config.toml: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_cat_support. : enable lightgbm cat support config.toml: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM categorical support: . : Set the enable lightgbm cat support config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_cat_support", + "output": "enable lightgbm cat support config.toml: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_cat_support", + "output": "enable lightgbm cat support config.toml: LightGBM categorical support: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm cat support", + "output": "enable lightgbm cat support config.toml: LightGBM categorical support: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM categorical support: ", + "output": "enable lightgbm cat support config.toml: LightGBM categorical support: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_cat_support", + "output": "enable lightgbm cat support config.toml: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_cat_support", + "output": "enable lightgbm cat support config.toml: LightGBM categorical support: Whether to enable LightGBM categorical feature support (runs in CPU mode even if GPUs enabled, and no MOJO built)" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_linear_tree do? : enable lightgbm linear tree config.toml: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_linear_tree. : enable lightgbm linear tree config.toml: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM linear_tree mode: . : Set the enable lightgbm linear tree config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_linear_tree", + "output": "enable lightgbm linear tree config.toml: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_linear_tree", + "output": "enable lightgbm linear tree config.toml: LightGBM linear_tree mode: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm linear tree", + "output": "enable lightgbm linear tree config.toml: LightGBM linear_tree mode: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM linear_tree mode: ", + "output": "enable lightgbm linear tree config.toml: LightGBM linear_tree mode: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_linear_tree", + "output": "enable lightgbm linear tree config.toml: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_linear_tree", + "output": "enable lightgbm linear tree config.toml: LightGBM linear_tree mode: Whether to enable LightGBM linear_tree handling(only CPU mode currently, no L1 regularization -- mae objective, and no MOJO build). " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_extra_trees do? : enable lightgbm extra trees config.toml: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_extra_trees. : enable lightgbm extra trees config.toml: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM extra trees mode: . : Set the enable lightgbm extra trees config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_extra_trees", + "output": "enable lightgbm extra trees config.toml: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_extra_trees", + "output": "enable lightgbm extra trees config.toml: LightGBM extra trees mode: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm extra trees", + "output": "enable lightgbm extra trees config.toml: LightGBM extra trees mode: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM extra trees mode: ", + "output": "enable lightgbm extra trees config.toml: LightGBM extra trees mode: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_extra_trees", + "output": "enable lightgbm extra trees config.toml: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_extra_trees", + "output": "enable lightgbm extra trees config.toml: LightGBM extra trees mode: Whether to enable LightGBM extra trees mode to help avoid overfitting" + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_monotone_constraints_method do? : lightgbm monotone constraints method config.toml: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_monotone_constraints_method. : lightgbm monotone constraints method config.toml: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Method to use for monotonicity constraints for LightGBM: . : Set the lightgbm monotone constraints method config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_monotone_constraints_method", + "output": "lightgbm monotone constraints method config.toml: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_monotone_constraints_method", + "output": "lightgbm monotone constraints method config.toml: Method to use for monotonicity constraints for LightGBM: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm monotone constraints method", + "output": "lightgbm monotone constraints method config.toml: Method to use for monotonicity constraints for LightGBM: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Method to use for monotonicity constraints for LightGBM: ", + "output": "lightgbm monotone constraints method config.toml: Method to use for monotonicity constraints for LightGBM: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_monotone_constraints_method", + "output": "lightgbm monotone constraints method config.toml: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_monotone_constraints_method", + "output": "lightgbm monotone constraints method config.toml: Method to use for monotonicity constraints for LightGBM: basic: as fast as when no constraints applied, but over-constrains the predictions.intermediate: very slightly slower, but much less constraining while still holding monotonicity and should be more accurate than basic.advanced: slower, but even more accurate than intermediate. " + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_monotone_penalty do? : lightgbm monotone penalty config.toml: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_monotone_penalty. : lightgbm monotone penalty config.toml: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM Monotone Penalty: . : Set the lightgbm monotone penalty config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_monotone_penalty", + "output": "lightgbm monotone penalty config.toml: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_monotone_penalty", + "output": "lightgbm monotone penalty config.toml: LightGBM Monotone Penalty: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm monotone penalty", + "output": "lightgbm monotone penalty config.toml: LightGBM Monotone Penalty: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM Monotone Penalty: ", + "output": "lightgbm monotone penalty config.toml: LightGBM Monotone Penalty: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_monotone_penalty", + "output": "lightgbm monotone penalty config.toml: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_monotone_penalty", + "output": "lightgbm monotone penalty config.toml: LightGBM Monotone Penalty: Forbids any monotone splits on the first x (rounded down) level(s) of the tree.The penalty applied to monotone splits on a given depth is a continuous,increasing function the penalization parameter.https://lightgbm.readthedocs.io/en/latest/Parameters.html#monotone_penalty " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_lightgbm_cuda_support do? : enable lightgbm cuda support config.toml: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_lightgbm_cuda_support. : enable lightgbm cuda support config.toml: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM CUDA support: . : Set the enable lightgbm cuda support config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_cuda_support", + "output": "enable lightgbm cuda support config.toml: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_lightgbm_cuda_support", + "output": "enable lightgbm cuda support config.toml: LightGBM CUDA support: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable lightgbm cuda support", + "output": "enable lightgbm cuda support config.toml: LightGBM CUDA support: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM CUDA support: ", + "output": "enable lightgbm cuda support config.toml: LightGBM CUDA support: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_lightgbm_cuda_support", + "output": "enable lightgbm cuda support config.toml: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_lightgbm_cuda_support", + "output": "enable lightgbm cuda support config.toml: LightGBM CUDA support: Whether to enable LightGBM CUDA implementation instead of OpenCL. CUDA with LightGBM only supported for Pascal+ (compute capability >=6.0)" + }, + { + "prompt_type": "plain", + "instruction": ": What does show_constant_model do? : show constant model config.toml: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_constant_model. : show constant model config.toml: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to show constant models in iteration panel even when not best model: . : Set the show constant model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_constant_model", + "output": "show constant model config.toml: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_constant_model", + "output": "show constant model config.toml: Whether to show constant models in iteration panel even when not best model: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show constant model", + "output": "show constant model config.toml: Whether to show constant models in iteration panel even when not best model: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to show constant models in iteration panel even when not best model: ", + "output": "show constant model config.toml: Whether to show constant models in iteration panel even when not best model: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_constant_model", + "output": "show constant model config.toml: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_constant_model", + "output": "show constant model config.toml: Whether to show constant models in iteration panel even when not best model: Whether to show constant models in iteration panel even when not best model." + }, + { + "prompt_type": "plain", + "instruction": ": What does xgboost_reg_objectives do? : xgboost reg objectives config.toml: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain xgboost_reg_objectives. : xgboost reg objectives config.toml: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select XGBoost regression objectives.: . : Set the xgboost reg objectives config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_reg_objectives", + "output": "xgboost reg objectives config.toml: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_reg_objectives", + "output": "xgboost reg objectives config.toml: Select XGBoost regression objectives.: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost reg objectives", + "output": "xgboost reg objectives config.toml: Select XGBoost regression objectives.: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select XGBoost regression objectives.: ", + "output": "xgboost reg objectives config.toml: Select XGBoost regression objectives.: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting xgboost_reg_objectives", + "output": "xgboost reg objectives config.toml: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting xgboost_reg_objectives", + "output": "xgboost reg objectives config.toml: Select XGBoost regression objectives.: Select objectives allowed for XGBoost. Added to allowed mutations (the default reg:squarederror is in sample list 3 times) Note: logistic, tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "plain", + "instruction": ": What does xgboost_reg_metrics do? : xgboost reg metrics config.toml: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain xgboost_reg_metrics. : xgboost reg metrics config.toml: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select XGBoost regression metrics.: . : Set the xgboost reg metrics config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_reg_metrics", + "output": "xgboost reg metrics config.toml: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_reg_metrics", + "output": "xgboost reg metrics config.toml: Select XGBoost regression metrics.: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost reg metrics", + "output": "xgboost reg metrics config.toml: Select XGBoost regression metrics.: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select XGBoost regression metrics.: ", + "output": "xgboost reg metrics config.toml: Select XGBoost regression metrics.: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting xgboost_reg_metrics", + "output": "xgboost reg metrics config.toml: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting xgboost_reg_metrics", + "output": "xgboost reg metrics config.toml: Select XGBoost regression metrics.: Select metrics allowed for XGBoost. Added to allowed mutations (the default rmse and mae are in sample list twice). Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "plain", + "instruction": ": What does xgboost_binary_metrics do? : xgboost binary metrics config.toml: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain xgboost_binary_metrics. : xgboost binary metrics config.toml: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select XGBoost binary metrics.: . : Set the xgboost binary metrics config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_binary_metrics", + "output": "xgboost binary metrics config.toml: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost_binary_metrics", + "output": "xgboost binary metrics config.toml: Select XGBoost binary metrics.: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "xgboost binary metrics", + "output": "xgboost binary metrics config.toml: Select XGBoost binary metrics.: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select XGBoost binary metrics.: ", + "output": "xgboost binary metrics config.toml: Select XGBoost binary metrics.: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting xgboost_binary_metrics", + "output": "xgboost binary metrics config.toml: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting xgboost_binary_metrics", + "output": "xgboost binary metrics config.toml: Select XGBoost binary metrics.: Select which objectives allowed for XGBoost. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_reg_objectives do? : lightgbm reg objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_reg_objectives. : lightgbm reg objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select LightGBM regression objectives.: . : Set the lightgbm reg objectives config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_reg_objectives", + "output": "lightgbm reg objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_reg_objectives", + "output": "lightgbm reg objectives config.toml: Select LightGBM regression objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm reg objectives", + "output": "lightgbm reg objectives config.toml: Select LightGBM regression objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select LightGBM regression objectives.: ", + "output": "lightgbm reg objectives config.toml: Select LightGBM regression objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_reg_objectives", + "output": "lightgbm reg objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_reg_objectives", + "output": "lightgbm reg objectives config.toml: Select LightGBM regression objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default mse is in sample list 2 times if selected). \"binary\" refers to logistic regression. Note: If choose quantile/huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for quantile or huber) or fairc (for fair) to LightGBM. Note: mse is same as rmse correponding to L2 loss. mae is L1 loss. Note: tweedie, gamma, poisson are only valid for targets with positive values. Note: The objective relates to the form of the (regularized) loss function, used to determine the split with maximum information gain, while the metric is the non-regularized metric measured on the validation set (external or internally generated by DAI). " + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_reg_metrics do? : lightgbm reg metrics config.toml: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_reg_metrics. : lightgbm reg metrics config.toml: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select LightGBM regression metrics.: . : Set the lightgbm reg metrics config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_reg_metrics", + "output": "lightgbm reg metrics config.toml: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_reg_metrics", + "output": "lightgbm reg metrics config.toml: Select LightGBM regression metrics.: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm reg metrics", + "output": "lightgbm reg metrics config.toml: Select LightGBM regression metrics.: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select LightGBM regression metrics.: ", + "output": "lightgbm reg metrics config.toml: Select LightGBM regression metrics.: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_reg_metrics", + "output": "lightgbm reg metrics config.toml: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_reg_metrics", + "output": "lightgbm reg metrics config.toml: Select LightGBM regression metrics.: Select metrics allowed for LightGBM. Added to allowed mutations (the default rmse is in sample list three times if selected). Note: If choose huber or fair and data is not normalized, recommendation is to use params_lightgbm to specify reasonable value of alpha (for huber or quantile) or fairc (for fair) to LightGBM. Note: tweedie, gamma, poisson are only valid for targets with positive values. " + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_binary_objectives do? : lightgbm binary objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_binary_objectives. : lightgbm binary objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select LightGBM binary objectives.: . : Set the lightgbm binary objectives config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_binary_objectives", + "output": "lightgbm binary objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_binary_objectives", + "output": "lightgbm binary objectives config.toml: Select LightGBM binary objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm binary objectives", + "output": "lightgbm binary objectives config.toml: Select LightGBM binary objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select LightGBM binary objectives.: ", + "output": "lightgbm binary objectives config.toml: Select LightGBM binary objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_binary_objectives", + "output": "lightgbm binary objectives config.toml: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_binary_objectives", + "output": "lightgbm binary objectives config.toml: Select LightGBM binary objectives.: Select objectives allowed for LightGBM. Added to allowed mutations (the default binary is in sample list 2 times if selected)" + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_binary_metrics do? : lightgbm binary metrics config.toml: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_binary_metrics. : lightgbm binary metrics config.toml: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select LightGBM binary metrics.: . : Set the lightgbm binary metrics config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_binary_metrics", + "output": "lightgbm binary metrics config.toml: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_binary_metrics", + "output": "lightgbm binary metrics config.toml: Select LightGBM binary metrics.: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm binary metrics", + "output": "lightgbm binary metrics config.toml: Select LightGBM binary metrics.: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select LightGBM binary metrics.: ", + "output": "lightgbm binary metrics config.toml: Select LightGBM binary metrics.: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_binary_metrics", + "output": "lightgbm binary metrics config.toml: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_binary_metrics", + "output": "lightgbm binary metrics config.toml: Select LightGBM binary metrics.: Select which binary metrics allowed for LightGBM. Added to allowed mutations (all evenly sampled)." + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_multi_metrics do? : lightgbm multi metrics config.toml: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_multi_metrics. : lightgbm multi metrics config.toml: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Select LightGBM multiclass metrics.: . : Set the lightgbm multi metrics config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_multi_metrics", + "output": "lightgbm multi metrics config.toml: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_multi_metrics", + "output": "lightgbm multi metrics config.toml: Select LightGBM multiclass metrics.: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm multi metrics", + "output": "lightgbm multi metrics config.toml: Select LightGBM multiclass metrics.: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Select LightGBM multiclass metrics.: ", + "output": "lightgbm multi metrics config.toml: Select LightGBM multiclass metrics.: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_multi_metrics", + "output": "lightgbm multi metrics config.toml: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_multi_metrics", + "output": "lightgbm multi metrics config.toml: Select LightGBM multiclass metrics.: Select which metrics allowed for multiclass LightGBM. Added to allowed mutations (evenly sampled if selected)." + }, + { + "prompt_type": "plain", + "instruction": ": What does tweedie_variance_power_list do? : tweedie variance power list config.toml: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tweedie_variance_power_list. : tweedie variance power list config.toml: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: tweedie_variance_power parameters: . : Set the tweedie variance power list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tweedie_variance_power_list", + "output": "tweedie variance power list config.toml: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tweedie_variance_power_list", + "output": "tweedie variance power list config.toml: tweedie_variance_power parameters: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tweedie variance power list", + "output": "tweedie variance power list config.toml: tweedie_variance_power parameters: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tweedie_variance_power parameters: ", + "output": "tweedie variance power list config.toml: tweedie_variance_power parameters: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tweedie_variance_power_list", + "output": "tweedie variance power list config.toml: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tweedie_variance_power_list", + "output": "tweedie variance power list config.toml: tweedie_variance_power parameters: tweedie_variance_power parameters to try for XGBoostModel and LightGBMModel if tweedie is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": What does huber_alpha_list do? : huber alpha list config.toml: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": Explain huber_alpha_list. : huber alpha list config.toml: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: huber parameters: . : Set the huber alpha list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "huber_alpha_list", + "output": "huber alpha list config.toml: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "huber_alpha_list", + "output": "huber alpha list config.toml: huber parameters: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "huber alpha list", + "output": "huber alpha list config.toml: huber parameters: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "huber parameters: ", + "output": "huber alpha list config.toml: huber parameters: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting huber_alpha_list", + "output": "huber alpha list config.toml: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting huber_alpha_list", + "output": "huber alpha list config.toml: huber parameters: huber parameters to try for LightGBMModel if huber is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": What does fair_c_list do? : fair c list config.toml: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fair_c_list. : fair c list config.toml: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: fair c parameters: . : Set the fair c list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fair_c_list", + "output": "fair c list config.toml: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fair_c_list", + "output": "fair c list config.toml: fair c parameters: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fair c list", + "output": "fair c list config.toml: fair c parameters: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fair c parameters: ", + "output": "fair c list config.toml: fair c parameters: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fair_c_list", + "output": "fair c list config.toml: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fair_c_list", + "output": "fair c list config.toml: fair c parameters: fair c parameters to try for LightGBMModel if fair is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": What does poisson_max_delta_step_list do? : poisson max delta step list config.toml: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": Explain poisson_max_delta_step_list. : poisson max delta step list config.toml: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: poisson_max_delta_step parameters: . : Set the poisson max delta step list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "poisson_max_delta_step_list", + "output": "poisson max delta step list config.toml: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "poisson_max_delta_step_list", + "output": "poisson max delta step list config.toml: poisson_max_delta_step parameters: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "poisson max delta step list", + "output": "poisson max delta step list config.toml: poisson_max_delta_step parameters: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "poisson_max_delta_step parameters: ", + "output": "poisson max delta step list config.toml: poisson_max_delta_step parameters: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting poisson_max_delta_step_list", + "output": "poisson max delta step list config.toml: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting poisson_max_delta_step_list", + "output": "poisson max delta step list config.toml: poisson_max_delta_step parameters: poisson max_delta_step parameters to try for LightGBMModel if poisson is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": What does quantile_alpha do? : quantile alpha config.toml: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": Explain quantile_alpha. : quantile alpha config.toml: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: quantile alpha parameters: . : Set the quantile alpha config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "quantile_alpha", + "output": "quantile alpha config.toml: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "quantile_alpha", + "output": "quantile alpha config.toml: quantile alpha parameters: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "quantile alpha", + "output": "quantile alpha config.toml: quantile alpha parameters: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "quantile alpha parameters: ", + "output": "quantile alpha config.toml: quantile alpha parameters: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting quantile_alpha", + "output": "quantile alpha config.toml: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting quantile_alpha", + "output": "quantile alpha config.toml: quantile alpha parameters: quantile alpha parameters to try for LightGBMModel if quantile is used. First value is default." + }, + { + "prompt_type": "plain", + "instruction": ": What does reg_lambda_glm_default do? : reg lambda glm default config.toml: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "plain", + "instruction": ": Explain reg_lambda_glm_default. : reg lambda glm default config.toml: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: default reg_lambda regularization parameter: . : Set the reg lambda glm default config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reg_lambda_glm_default", + "output": "reg lambda glm default config.toml: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reg_lambda_glm_default", + "output": "reg lambda glm default config.toml: default reg_lambda regularization parameter: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "reg lambda glm default", + "output": "reg lambda glm default config.toml: default reg_lambda regularization parameter: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default reg_lambda regularization parameter: ", + "output": "reg lambda glm default config.toml: default reg_lambda regularization parameter: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting reg_lambda_glm_default", + "output": "reg lambda glm default config.toml: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting reg_lambda_glm_default", + "output": "reg lambda glm default config.toml: default reg_lambda regularization parameter: Default reg_lambda regularization for GLM." + }, + { + "prompt_type": "plain", + "instruction": ": What does lossguide_drop_factor do? : lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lossguide_drop_factor. : lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lossguide_drop_factor", + "output": "lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lossguide_drop_factor", + "output": "lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lossguide drop factor", + "output": "lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: ", + "output": "lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lossguide_drop_factor", + "output": "lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lossguide_drop_factor", + "output": "lossguide drop factor config.toml: Factor by which to drop max_leaves from effective max_depth value when doing loss_guide. E.g. if max_depth is normally 12, this makes leaves 2**11 not 2**12: " + }, + { + "prompt_type": "plain", + "instruction": ": What does lossguide_max_depth_extend_factor do? : lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lossguide_max_depth_extend_factor. : lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lossguide_max_depth_extend_factor", + "output": "lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lossguide_max_depth_extend_factor", + "output": "lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lossguide max depth extend factor", + "output": "lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: ", + "output": "lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lossguide_max_depth_extend_factor", + "output": "lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lossguide_max_depth_extend_factor", + "output": "lossguide max depth extend factor config.toml: Factor by which to extend max_depth mutations when doing loss_guide. E.g. if max_leaves ends up as x let max_depth be factor * x.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_lightgbm do? : params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_lightgbm. : params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_lightgbm", + "output": "params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_lightgbm", + "output": "params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params lightgbm", + "output": "params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_lightgbm", + "output": "params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_lightgbm", + "output": "params lightgbm config.toml: Parameters for LightGBM to override DAI parameters e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_lightgbm=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_lightgbm=\"{'n_estimators': 600, 'learning_rate': 0.1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'binary', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: 'objective': 'binary', unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_xgboost do? : params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_xgboost. : params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_xgboost", + "output": "params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_xgboost", + "output": "params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params xgboost", + "output": "params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_xgboost", + "output": "params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_xgboost", + "output": "params xgboost config.toml: Parameters for XGBoost to override DAI parameters similar parameters as LightGBM since LightGBM parameters are transcribed from XGBoost equivalent versions e.g. ``params_xgboost=\"{'n_estimators': 100, 'max_leaves': 64, 'max_depth': 0, 'random_state': 1234}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_xgboost_rf do? : params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_xgboost_rf. : params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_xgboost_rf", + "output": "params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_xgboost_rf", + "output": "params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params xgboost rf", + "output": "params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_xgboost_rf", + "output": "params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_xgboost_rf", + "output": "params xgboost rf config.toml: Like params_xgboost but for XGBoost random forest." + }, + { + "prompt_type": "plain", + "instruction": ": What does params_dart do? : params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_dart. : params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_dart", + "output": "params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_dart", + "output": "params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params dart", + "output": "params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_dart", + "output": "params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_dart", + "output": "params dart config.toml: Like params_xgboost but for XGBoost's dart method" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tensorflow do? : params tensorflow config.toml: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tensorflow. : params tensorflow config.toml: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Parameters for TensorFlow: . : Set the params tensorflow config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tensorflow", + "output": "params tensorflow config.toml: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tensorflow", + "output": "params tensorflow config.toml: Parameters for TensorFlow: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tensorflow", + "output": "params tensorflow config.toml: Parameters for TensorFlow: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Parameters for TensorFlow: ", + "output": "params tensorflow config.toml: Parameters for TensorFlow: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tensorflow", + "output": "params tensorflow config.toml: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tensorflow", + "output": "params tensorflow config.toml: Parameters for TensorFlow: Parameters for TensorFlow to override DAI parameterse.g. ``params_tensorflow=\"{'lr': 0.01, 'add_wide': False, 'add_attention': True, 'epochs': 30, 'layers': (100, 100), 'activation': 'selu', 'batch_size': 64, 'chunk_size': 1000, 'dropout': 0.3, 'strategy': '1cycle', 'l1': 0.0, 'l2': 0.0, 'ort_loss': 0.5, 'ort_loss_tau': 0.01, 'normalize_type': 'streaming'}\"``See: https://keras.io/ , e.g. for activations: https://keras.io/activations/Example layers: ``(500, 500, 500), (100, 100, 100), (100, 100), (50, 50)``Strategies: ``'1cycle'`` or ``'one_shot'``, See: https://github.com/fastai/fastai'one_shot\" is not allowed for ensembles.normalize_type: 'streaming' or 'global' (using sklearn StandardScaler) " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_gblinear do? : params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_gblinear. : params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_gblinear", + "output": "params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_gblinear", + "output": "params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params gblinear", + "output": "params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_gblinear", + "output": "params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_gblinear", + "output": "params gblinear config.toml: Parameters for XGBoost's gblinear to override DAI parameters e.g. ``params_gblinear=\"{'n_estimators': 100}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_decision_tree do? : params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_decision_tree. : params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_decision_tree", + "output": "params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_decision_tree", + "output": "params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params decision tree", + "output": "params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_decision_tree", + "output": "params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_decision_tree", + "output": "params decision tree config.toml: Parameters for Decision Tree to override DAI parameters parameters should be given as XGBoost equivalent unless unique LightGBM parameter e.g. ``'eval_metric'`` instead of ``'metric'`` should be used e.g. ``params_decision_tree=\"{'objective': 'binary', 'n_estimators': 100, 'max_leaves': 64, 'random_state': 1234}\"`` e.g. ``params_decision_tree=\"{'n_estimators': 1, 'learning_rate': 1, 'reg_alpha': 0.0, 'reg_lambda': 0.5, 'gamma': 0, 'max_depth': 0, 'max_bin': 128, 'max_leaves': 256, 'scale_pos_weight': 1.0, 'max_delta_step': 3.469919910597877, 'min_child_weight': 1, 'subsample': 0.9, 'colsample_bytree': 0.3, 'tree_method': 'gpu_hist', 'grow_policy': 'lossguide', 'min_data_in_bin': 3, 'min_child_samples': 5, 'early_stopping_rounds': 20, 'num_classes': 2, 'objective': 'binary', 'eval_metric': 'logloss', 'random_state': 987654, 'early_stopping_threshold': 0.01, 'monotonicity_constraints': False, 'silent': True, 'debug_verbose': 0, 'subsample_freq': 1}\"`` avoid including \"system\"-level parameters like ``'n_gpus': 1, 'gpu_id': 0, , 'n_jobs': 1, 'booster': 'lightgbm'`` also likely should avoid parameters like: ``'objective': 'binary:logistic'``, unless one really knows what one is doing (e.g. alternative objectives) See: https://xgboost.readthedocs.io/en/latest/parameter.html And see: https://github.com/Microsoft/LightGBM/blob/master/docs/Parameters.rst Can also pass objective parameters if choose (or in case automatically chosen) certain objectives https://lightgbm.readthedocs.io/en/latest/Parameters.html#metric-parameters" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_rulefit do? : params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_rulefit. : params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_rulefit", + "output": "params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_rulefit", + "output": "params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params rulefit", + "output": "params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_rulefit", + "output": "params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_rulefit", + "output": "params rulefit config.toml: Parameters for Rulefit to override DAI parameters e.g. ``params_rulefit=\"{'max_leaves': 64}\"`` See: https://xgboost.readthedocs.io/en/latest/parameter.html" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_ftrl do? : params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_ftrl. : params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_ftrl", + "output": "params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_ftrl", + "output": "params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params ftrl", + "output": "params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_ftrl", + "output": "params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_ftrl", + "output": "params ftrl config.toml: Parameters for FTRL to override DAI parameters" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_grownet do? : params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_grownet. : params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_grownet", + "output": "params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_grownet", + "output": "params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params grownet", + "output": "params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_grownet", + "output": "params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_grownet", + "output": "params grownet config.toml: Parameters for GrowNet to override DAI parameters" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_mode do? : params tune mode config.toml: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_mode. : params tune mode config.toml: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Mode to handle params_tune_ tomls: . : Set the params tune mode config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_mode", + "output": "params tune mode config.toml: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_mode", + "output": "params tune mode config.toml: Mode to handle params_tune_ tomls: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune mode", + "output": "params tune mode config.toml: Mode to handle params_tune_ tomls: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Mode to handle params_tune_ tomls: ", + "output": "params tune mode config.toml: Mode to handle params_tune_ tomls: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_mode", + "output": "params tune mode config.toml: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_mode", + "output": "params tune mode config.toml: Mode to handle params_tune_ tomls: How to handle tomls like params_tune_lightgbm.override: For any key in the params_tune_ toml dict, use the list of values instead of DAI's list of values.override_and_first_as_default: like override, but also use first entry in tuple/list (if present) as override as replacement for (e.g.) params_lightgbm when using params_tune_lightgbm.exclusive: Only tune the keys in the params_tune_ toml dict, unless no keys are present. Otherwise use DAI's default values.exclusive_and_first_as_default: Like exclusive but same first as default behavior as override_and_first_as_default.In order to fully control hyperparameter tuning, either one should set \"override\" mode and include every hyperparameter and at least one value in each list within the dictionary, or choose \"exclusive\" and then rely upon DAI unchanging default values for any keys not given.For custom recipes, one can use recipe_dict to pass hyperparameters and if using the \"get_one()\" function in a custom recipe, and if user_tune passed contains the hyperparameter dictionary equivalent of params_tune_ tomls, then this params_tune_mode will also work for custom recipes." + }, + { + "prompt_type": "plain", + "instruction": ": What does params_final_auto_adjust do? : params final auto adjust config.toml: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_final_auto_adjust. : params final auto adjust config.toml: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Adjust trees/LR: . : Set the params final auto adjust config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_final_auto_adjust", + "output": "params final auto adjust config.toml: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_final_auto_adjust", + "output": "params final auto adjust config.toml: Adjust trees/LR: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params final auto adjust", + "output": "params final auto adjust config.toml: Adjust trees/LR: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Adjust trees/LR: ", + "output": "params final auto adjust config.toml: Adjust trees/LR: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_final_auto_adjust", + "output": "params final auto adjust config.toml: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_final_auto_adjust", + "output": "params final auto adjust config.toml: Adjust trees/LR: Whether to adjust GBM trees, learning rate, and early_stopping_rounds for GBM models or recipes with _is_gbm=True.True: auto mode, that changes trees/LR/stopping if tune_learning_rate=false and early stopping is supported by the model and model is GBM or from custom individual with parameter in adjusted_params.False: disable any adjusting from tuning-evolution into final model.Setting this to false is required if (e.g.) one changes params_lightgbm or params_tune_lightgbm and wanted to preserve the tuning-evolution values into the final model.One should also set tune_learning_rate to true to tune the learning_rate, else it will be fixed to some single value." + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_lightgbm do? : params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_lightgbm. : params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_lightgbm", + "output": "params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_lightgbm", + "output": "params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune lightgbm", + "output": "params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_lightgbm", + "output": "params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_lightgbm", + "output": "params tune lightgbm config.toml: Dictionary of key:lists of values to use for LightGBM tuning, overrides DAI's choice per key e.g. ``params_tune_lightgbm=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_xgboost do? : params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_xgboost. : params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_xgboost", + "output": "params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_xgboost", + "output": "params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune xgboost", + "output": "params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_xgboost", + "output": "params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_xgboost", + "output": "params tune xgboost config.toml: Like params_tune_lightgbm but for XGBoost e.g. ``params_tune_xgboost=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_xgboost_rf do? : params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_xgboost_rf. : params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_xgboost_rf", + "output": "params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_xgboost_rf", + "output": "params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune xgboost rf", + "output": "params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_xgboost_rf", + "output": "params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_xgboost_rf", + "output": "params tune xgboost rf config.toml: Like params_tune_lightgbm but for XGBoost random forest e.g. ``params_tune_xgboost_rf=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_decision_tree do? : params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_decision_tree. : params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_decision_tree", + "output": "params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_decision_tree", + "output": "params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune decision tree", + "output": "params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_decision_tree", + "output": "params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_decision_tree", + "output": "params tune decision tree config.toml: Dictionary of key:lists of values to use for LightGBM Decision Tree tuning, overrides DAI's choice per key e.g. ``params_tune_decision_tree=\"{'min_child_samples': [1,2,5,100,1000], 'min_data_in_bin': [1,2,3,10,100,1000]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_dart do? : params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_dart. : params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_dart", + "output": "params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_dart", + "output": "params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune dart", + "output": "params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_dart", + "output": "params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_dart", + "output": "params tune dart config.toml: Like params_tune_lightgbm but for XGBoost's Dart e.g. ``params_tune_dart=\"{'max_leaves': [8, 16, 32, 64]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_tensorflow do? : params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_tensorflow. : params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_tensorflow", + "output": "params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_tensorflow", + "output": "params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune tensorflow", + "output": "params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_tensorflow", + "output": "params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_tensorflow", + "output": "params tune tensorflow config.toml: Like params_tune_lightgbm but for TensorFlow e.g. ``params_tune_tensorflow=\"{'layers': [(10,10,10), (10, 10, 10, 10)]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_gblinear do? : params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_gblinear. : params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_gblinear", + "output": "params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_gblinear", + "output": "params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune gblinear", + "output": "params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_gblinear", + "output": "params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_gblinear", + "output": "params tune gblinear config.toml: Like params_tune_lightgbm but for gblinear e.g. ``params_tune_gblinear=\"{'reg_lambda': [.01, .001, .0001, .0002]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_rulefit do? : params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_rulefit. : params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_rulefit", + "output": "params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_rulefit", + "output": "params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune rulefit", + "output": "params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_rulefit", + "output": "params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_rulefit", + "output": "params tune rulefit config.toml: Like params_tune_lightgbm but for rulefit e.g. ``params_tune_rulefit=\"{'max_depth': [4, 5, 6]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_ftrl do? : params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_ftrl. : params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_ftrl", + "output": "params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_ftrl", + "output": "params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune ftrl", + "output": "params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_ftrl", + "output": "params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_ftrl", + "output": "params tune ftrl config.toml: Like params_tune_lightgbm but for ftrl" + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_grownet do? : params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_grownet. : params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_grownet", + "output": "params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_grownet", + "output": "params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune grownet", + "output": "params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_grownet", + "output": "params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_grownet", + "output": "params tune grownet config.toml: Like params_tune_lightgbm but for GrowNet e.g. ``params_tune_grownet=\"{'input_dropout': [0.2, 0.5]}\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does params_tune_grow_policy_simple_trees do? : params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "plain", + "instruction": ": Explain params_tune_grow_policy_simple_trees. : params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_grow_policy_simple_trees", + "output": "params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params_tune_grow_policy_simple_trees", + "output": "params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "params tune grow policy simple trees", + "output": "params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting params_tune_grow_policy_simple_trees", + "output": "params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting params_tune_grow_policy_simple_trees", + "output": "params tune grow policy simple trees config.toml: Whether to force max_leaves and max_depth to be 0 if grow_policy is depthwise and lossguide, respectively." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_nestimators do? : max nestimators config.toml: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_nestimators. : max nestimators config.toml: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of trees/iterations: . : Set the max nestimators config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_nestimators", + "output": "max nestimators config.toml: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_nestimators", + "output": "max nestimators config.toml: Max. number of trees/iterations: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max nestimators", + "output": "max nestimators config.toml: Max. number of trees/iterations: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of trees/iterations: ", + "output": "max nestimators config.toml: Max. number of trees/iterations: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_nestimators", + "output": "max nestimators config.toml: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_nestimators", + "output": "max nestimators config.toml: Max. number of trees/iterations: Maximum number of GBM trees or GLM iterations. Can be reduced for lower accuracy and/or higher interpretability. Early-stopping usually chooses less. Ignored if fixed_max_nestimators is > 0. " + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_max_nestimators do? : fixed max nestimators config.toml: Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_max_nestimators. : fixed max nestimators config.toml: Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fixed max. number of trees/iterations (-1 = auto mode): . : Set the fixed max nestimators config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_max_nestimators", + "output": "fixed max nestimators config.toml: Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_max_nestimators", + "output": "fixed max nestimators config.toml: Fixed max. number of trees/iterations (-1 = auto mode): Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed max nestimators", + "output": "fixed max nestimators config.toml: Fixed max. number of trees/iterations (-1 = auto mode): Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fixed max. number of trees/iterations (-1 = auto mode): ", + "output": "fixed max nestimators config.toml: Fixed max. number of trees/iterations (-1 = auto mode): Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_max_nestimators", + "output": "fixed max nestimators config.toml: Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_max_nestimators", + "output": "fixed max nestimators config.toml: Fixed max. number of trees/iterations (-1 = auto mode): Fixed maximum number of GBM trees or GLM iterations. If > 0, ignores max_nestimators and disables automatic reductiondue to lower accuracy or higher interpretability. Early-stopping usually chooses less. " + }, + { + "prompt_type": "plain", + "instruction": ": What does n_estimators_list_no_early_stopping do? : n estimators list no early stopping config.toml: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain n_estimators_list_no_early_stopping. : n estimators list no early stopping config.toml: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: n_estimators list to sample from for model mutations for models that do not use early stopping: . : Set the n estimators list no early stopping config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "n_estimators_list_no_early_stopping", + "output": "n estimators list no early stopping config.toml: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "n_estimators_list_no_early_stopping", + "output": "n estimators list no early stopping config.toml: n_estimators list to sample from for model mutations for models that do not use early stopping: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "n estimators list no early stopping", + "output": "n estimators list no early stopping config.toml: n_estimators list to sample from for model mutations for models that do not use early stopping: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "n_estimators list to sample from for model mutations for models that do not use early stopping: ", + "output": "n estimators list no early stopping config.toml: n_estimators list to sample from for model mutations for models that do not use early stopping: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting n_estimators_list_no_early_stopping", + "output": "n estimators list no early stopping config.toml: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting n_estimators_list_no_early_stopping", + "output": "n estimators list no early stopping config.toml: n_estimators list to sample from for model mutations for models that do not use early stopping: LightGBM dart mode and normal rf mode do not use early stopping, and they will sample from these values for n_estimators. XGBoost Dart mode will also sample from these n_estimators. Also applies to XGBoost Dask models that do not yet support early stopping or callbacks. For default parameters it chooses first value in list, while mutations sample from the list. " + }, + { + "prompt_type": "plain", + "instruction": ": What does min_learning_rate_final do? : min learning rate final config.toml: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_learning_rate_final. : min learning rate final config.toml: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minimum learning rate for final ensemble GBM models: . : Set the min learning rate final config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_learning_rate_final", + "output": "min learning rate final config.toml: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_learning_rate_final", + "output": "min learning rate final config.toml: Minimum learning rate for final ensemble GBM models: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min learning rate final", + "output": "min learning rate final config.toml: Minimum learning rate for final ensemble GBM models: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minimum learning rate for final ensemble GBM models: ", + "output": "min learning rate final config.toml: Minimum learning rate for final ensemble GBM models: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_learning_rate_final", + "output": "min learning rate final config.toml: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_learning_rate_final", + "output": "min learning rate final config.toml: Minimum learning rate for final ensemble GBM models: Lower limit on learning rate for final ensemble GBM models.In some cases, the maximum number of trees/iterations is insufficient for the final learning rate,which can lead to no early stopping triggered and poor final model performance.Then, one can try increasing the learning rate by raising this minimum,or one can try increasing the maximum number of trees/iterations. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_learning_rate_final do? : max learning rate final config.toml: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_learning_rate_final. : max learning rate final config.toml: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum learning rate for final ensemble GBM models: . : Set the max learning rate final config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_learning_rate_final", + "output": "max learning rate final config.toml: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_learning_rate_final", + "output": "max learning rate final config.toml: Maximum learning rate for final ensemble GBM models: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max learning rate final", + "output": "max learning rate final config.toml: Maximum learning rate for final ensemble GBM models: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum learning rate for final ensemble GBM models: ", + "output": "max learning rate final config.toml: Maximum learning rate for final ensemble GBM models: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_learning_rate_final", + "output": "max learning rate final config.toml: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_learning_rate_final", + "output": "max learning rate final config.toml: Maximum learning rate for final ensemble GBM models: Upper limit on learning rate for final ensemble GBM models" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_nestimators_feature_evolution_factor do? : max nestimators feature evolution factor config.toml: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_nestimators_feature_evolution_factor. : max nestimators feature evolution factor config.toml: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Reduction factor for max. number of trees/iterations during feature evolution: . : Set the max nestimators feature evolution factor config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_nestimators_feature_evolution_factor", + "output": "max nestimators feature evolution factor config.toml: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_nestimators_feature_evolution_factor", + "output": "max nestimators feature evolution factor config.toml: Reduction factor for max. number of trees/iterations during feature evolution: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max nestimators feature evolution factor", + "output": "max nestimators feature evolution factor config.toml: Reduction factor for max. number of trees/iterations during feature evolution: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Reduction factor for max. number of trees/iterations during feature evolution: ", + "output": "max nestimators feature evolution factor config.toml: Reduction factor for max. number of trees/iterations during feature evolution: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_nestimators_feature_evolution_factor", + "output": "max nestimators feature evolution factor config.toml: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_nestimators_feature_evolution_factor", + "output": "max nestimators feature evolution factor config.toml: Reduction factor for max. number of trees/iterations during feature evolution: factor by which max_nestimators is reduced for tuning and feature evolution" + }, + { + "prompt_type": "plain", + "instruction": ": What does min_learning_rate do? : min learning rate config.toml: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_learning_rate. : min learning rate config.toml: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. learning rate for feature engineering GBM models: . : Set the min learning rate config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_learning_rate", + "output": "min learning rate config.toml: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_learning_rate", + "output": "min learning rate config.toml: Min. learning rate for feature engineering GBM models: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min learning rate", + "output": "min learning rate config.toml: Min. learning rate for feature engineering GBM models: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. learning rate for feature engineering GBM models: ", + "output": "min learning rate config.toml: Min. learning rate for feature engineering GBM models: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_learning_rate", + "output": "min learning rate config.toml: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_learning_rate", + "output": "min learning rate config.toml: Min. learning rate for feature engineering GBM models: Lower limit on learning rate for feature engineering GBM models" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_learning_rate do? : max learning rate config.toml: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_learning_rate. : max learning rate config.toml: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. learning rate for feature engineering GBM models: . : Set the max learning rate config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_learning_rate", + "output": "max learning rate config.toml: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_learning_rate", + "output": "max learning rate config.toml: Max. learning rate for feature engineering GBM models: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max learning rate", + "output": "max learning rate config.toml: Max. learning rate for feature engineering GBM models: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. learning rate for feature engineering GBM models: ", + "output": "max learning rate config.toml: Max. learning rate for feature engineering GBM models: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_learning_rate", + "output": "max learning rate config.toml: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_learning_rate", + "output": "max learning rate config.toml: Max. learning rate for feature engineering GBM models: Upper limit on learning rate for GBM modelsIf want to override min_learning_rate and min_learning_rate_final, set this to smaller value " + }, + { + "prompt_type": "plain", + "instruction": ": What does lock_ga_to_final_trees do? : lock ga to final trees config.toml: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "plain", + "instruction": ": Explain lock_ga_to_final_trees. : lock ga to final trees config.toml: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to lock tree parameters to final model values: . : Set the lock ga to final trees config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lock_ga_to_final_trees", + "output": "lock ga to final trees config.toml: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lock_ga_to_final_trees", + "output": "lock ga to final trees config.toml: Whether to lock tree parameters to final model values: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lock ga to final trees", + "output": "lock ga to final trees config.toml: Whether to lock tree parameters to final model values: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to lock tree parameters to final model values: ", + "output": "lock ga to final trees config.toml: Whether to lock tree parameters to final model values: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lock_ga_to_final_trees", + "output": "lock ga to final trees config.toml: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lock_ga_to_final_trees", + "output": "lock ga to final trees config.toml: Whether to lock tree parameters to final model values: Whether to lock learning rate, tree count, early stopping rounds for GBM algorithms to the final model values." + }, + { + "prompt_type": "plain", + "instruction": ": What does tune_learning_rate do? : tune learning rate config.toml: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain tune_learning_rate. : tune learning rate config.toml: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to tune learning rate even for GBM algorithms with early stopping: . : Set the tune learning rate config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune_learning_rate", + "output": "tune learning rate config.toml: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune_learning_rate", + "output": "tune learning rate config.toml: Whether to tune learning rate even for GBM algorithms with early stopping: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tune learning rate", + "output": "tune learning rate config.toml: Whether to tune learning rate even for GBM algorithms with early stopping: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to tune learning rate even for GBM algorithms with early stopping: ", + "output": "tune learning rate config.toml: Whether to tune learning rate even for GBM algorithms with early stopping: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tune_learning_rate", + "output": "tune learning rate config.toml: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tune_learning_rate", + "output": "tune learning rate config.toml: Whether to tune learning rate even for GBM algorithms with early stopping: Whether to tune learning rate for GBM algorithms (if not doing just single final model).If tuning with Optuna, might help isolate optimal learning rate. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_epochs do? : max epochs config.toml: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_epochs. : max epochs config.toml: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of epochs for TensorFlow / FTRL: . : Set the max epochs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_epochs", + "output": "max epochs config.toml: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_epochs", + "output": "max epochs config.toml: Max. number of epochs for TensorFlow / FTRL: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max epochs", + "output": "max epochs config.toml: Max. number of epochs for TensorFlow / FTRL: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of epochs for TensorFlow / FTRL: ", + "output": "max epochs config.toml: Max. number of epochs for TensorFlow / FTRL: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_epochs", + "output": "max epochs config.toml: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_epochs", + "output": "max epochs config.toml: Max. number of epochs for TensorFlow / FTRL: Max. number of epochs for TensorFlow and FTRL models" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_epochs_tf_big_data do? : max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_epochs_tf_big_data. : max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_epochs_tf_big_data", + "output": "max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_epochs_tf_big_data", + "output": "max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max epochs tf big data", + "output": "max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_epochs_tf_big_data", + "output": "max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_epochs_tf_big_data", + "output": "max epochs tf big data config.toml: Number of epochs for TensorFlow when larger data size." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_max_depth do? : max max depth config.toml: Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_max_depth. : max max depth config.toml: Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. tree depth (and Max. max_leaves as 2**max_max_depth): . : Set the max max depth config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_depth", + "output": "max max depth config.toml: Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_depth", + "output": "max max depth config.toml: Max. tree depth (and Max. max_leaves as 2**max_max_depth): Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max max depth", + "output": "max max depth config.toml: Max. tree depth (and Max. max_leaves as 2**max_max_depth): Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. tree depth (and Max. max_leaves as 2**max_max_depth): ", + "output": "max max depth config.toml: Max. tree depth (and Max. max_leaves as 2**max_max_depth): Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_max_depth", + "output": "max max depth config.toml: Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_max_depth", + "output": "max max depth config.toml: Max. tree depth (and Max. max_leaves as 2**max_max_depth): Maximum tree depth (and corresponding max max_leaves as 2**max_max_depth)" + }, + { + "prompt_type": "plain", + "instruction": ": What does default_max_bin do? : default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "plain", + "instruction": ": Explain default_max_bin. : default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_max_bin", + "output": "default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_max_bin", + "output": "default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default max bin", + "output": "default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting default_max_bin", + "output": "default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting default_max_bin", + "output": "default max bin config.toml: Default max_bin for tree methods" + }, + { + "prompt_type": "plain", + "instruction": ": What does default_lightgbm_max_bin do? : default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain default_lightgbm_max_bin. : default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_lightgbm_max_bin", + "output": "default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_lightgbm_max_bin", + "output": "default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default lightgbm max bin", + "output": "default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting default_lightgbm_max_bin", + "output": "default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting default_lightgbm_max_bin", + "output": "default lightgbm max bin config.toml: Default max_bin for LightGBM (64 recommended for GPU LightGBM for speed)" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_max_bin do? : max max bin config.toml: Maximum max_bin for tree features" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_max_bin. : max max bin config.toml: Maximum max_bin for tree features" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. max_bin for tree features: . : Set the max max bin config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_bin", + "output": "max max bin config.toml: Maximum max_bin for tree features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_max_bin", + "output": "max max bin config.toml: Max. max_bin for tree features: Maximum max_bin for tree features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max max bin", + "output": "max max bin config.toml: Max. max_bin for tree features: Maximum max_bin for tree features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. max_bin for tree features: ", + "output": "max max bin config.toml: Max. max_bin for tree features: Maximum max_bin for tree features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_max_bin", + "output": "max max bin config.toml: Maximum max_bin for tree features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_max_bin", + "output": "max max bin config.toml: Max. max_bin for tree features: Maximum max_bin for tree features" + }, + { + "prompt_type": "plain", + "instruction": ": What does min_max_bin do? : min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_max_bin. : min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_max_bin", + "output": "min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_max_bin", + "output": "min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min max bin", + "output": "min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_max_bin", + "output": "min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_max_bin", + "output": "min max bin config.toml: Minimum max_bin for any tree" + }, + { + "prompt_type": "plain", + "instruction": ": What does scale_mem_for_max_bin do? : scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "plain", + "instruction": ": Explain scale_mem_for_max_bin. : scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "scale_mem_for_max_bin", + "output": "scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "scale_mem_for_max_bin", + "output": "scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "scale mem for max bin", + "output": "scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting scale_mem_for_max_bin", + "output": "scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting scale_mem_for_max_bin", + "output": "scale mem for max bin config.toml: Amount of memory which can handle max_bin = 256 can handle 125 columns and max_bin = 32 for 1000 columns As available memory on system goes higher than this scale, can handle proportionally more columns at higher max_bin Currently set to 10GB" + }, + { + "prompt_type": "plain", + "instruction": ": What does factor_rf do? : factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "plain", + "instruction": ": Explain factor_rf. : factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "factor_rf", + "output": "factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "factor_rf", + "output": "factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "factor rf", + "output": "factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting factor_rf", + "output": "factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting factor_rf", + "output": "factor rf config.toml: Factor by which rf gets more depth than gbdt" + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_use_all_cores do? : tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_use_all_cores. : tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_use_all_cores", + "output": "tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_use_all_cores", + "output": "tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow use all cores", + "output": "tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_use_all_cores", + "output": "tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_use_all_cores", + "output": "tensorflow use all cores config.toml: Whether TensorFlow will use all CPU cores, or if it will split among all transformers. Only for transformers, not TensorFlow model." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_use_all_cores_even_if_reproducible_true do? : tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_use_all_cores_even_if_reproducible_true. : tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_use_all_cores_even_if_reproducible_true", + "output": "tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_use_all_cores_even_if_reproducible_true", + "output": "tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow use all cores even if reproducible true", + "output": "tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_use_all_cores_even_if_reproducible_true", + "output": "tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_use_all_cores_even_if_reproducible_true", + "output": "tensorflow use all cores even if reproducible true config.toml: Whether TensorFlow will use all CPU cores if reproducible is set, or if it will split among all transformers" + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_disable_memory_optimization do? : tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_disable_memory_optimization. : tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_disable_memory_optimization", + "output": "tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_disable_memory_optimization", + "output": "tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow disable memory optimization", + "output": "tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_disable_memory_optimization", + "output": "tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_disable_memory_optimization", + "output": "tensorflow disable memory optimization config.toml: Whether to disable TensorFlow memory optimizations. Can help fix tensorflow.python.framework.errors_impl.AlreadyExistsError" + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_cores do? : tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_cores. : tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_cores", + "output": "tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_cores", + "output": "tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow cores", + "output": "tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_cores", + "output": "tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_cores", + "output": "tensorflow cores config.toml: How many cores to use for each TensorFlow model, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_model_max_cores do? : tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_model_max_cores. : tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_model_max_cores", + "output": "tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_model_max_cores", + "output": "tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow model max cores", + "output": "tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_model_max_cores", + "output": "tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_model_max_cores", + "output": "tensorflow model max cores config.toml: For TensorFlow models, maximum number of cores to use if tensorflow_cores=0 (auto mode), because TensorFlow model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "plain", + "instruction": ": What does bert_cores do? : bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain bert_cores. : bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert_cores", + "output": "bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert_cores", + "output": "bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert cores", + "output": "bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting bert_cores", + "output": "bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting bert_cores", + "output": "bert cores config.toml: How many cores to use for each Bert Model and Transformer, regardless if GPU or CPU based (0 = auto mode)" + }, + { + "prompt_type": "plain", + "instruction": ": What does bert_use_all_cores do? : bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain bert_use_all_cores. : bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert_use_all_cores", + "output": "bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert_use_all_cores", + "output": "bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert use all cores", + "output": "bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting bert_use_all_cores", + "output": "bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting bert_use_all_cores", + "output": "bert use all cores config.toml: Whether Bert will use all CPU cores, or if it will split among all transformers. Only for transformers, not Bert model." + }, + { + "prompt_type": "plain", + "instruction": ": What does bert_model_max_cores do? : bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain bert_model_max_cores. : bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert_model_max_cores", + "output": "bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert_model_max_cores", + "output": "bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bert model max cores", + "output": "bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting bert_model_max_cores", + "output": "bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting bert_model_max_cores", + "output": "bert model max cores config.toml: For Bert models, maximum number of cores to use if bert_cores=0 (auto mode), because Bert model is inefficient at using many cores. See also max_fit_cores for all models." + }, + { + "prompt_type": "plain", + "instruction": ": What does rulefit_max_num_rules do? : rulefit max num rules config.toml: Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain rulefit_max_num_rules. : rulefit max num rules config.toml: Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of rules for RuleFit (-1 for all): . : Set the rulefit max num rules config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit_max_num_rules", + "output": "rulefit max num rules config.toml: Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit_max_num_rules", + "output": "rulefit max num rules config.toml: Max. number of rules for RuleFit (-1 for all): Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit max num rules", + "output": "rulefit max num rules config.toml: Max. number of rules for RuleFit (-1 for all): Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of rules for RuleFit (-1 for all): ", + "output": "rulefit max num rules config.toml: Max. number of rules for RuleFit (-1 for all): Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting rulefit_max_num_rules", + "output": "rulefit max num rules config.toml: Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting rulefit_max_num_rules", + "output": "rulefit max num rules config.toml: Max. number of rules for RuleFit (-1 for all): Max number of rules to be used for RuleFit models (-1 for all)" + }, + { + "prompt_type": "plain", + "instruction": ": What does rulefit_max_tree_depth do? : rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain rulefit_max_tree_depth. : rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit_max_tree_depth", + "output": "rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit_max_tree_depth", + "output": "rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit max tree depth", + "output": "rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting rulefit_max_tree_depth", + "output": "rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting rulefit_max_tree_depth", + "output": "rulefit max tree depth config.toml: Max tree depth for RuleFit models" + }, + { + "prompt_type": "plain", + "instruction": ": What does rulefit_max_num_trees do? : rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "plain", + "instruction": ": Explain rulefit_max_num_trees. : rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit_max_num_trees", + "output": "rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit_max_num_trees", + "output": "rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rulefit max num trees", + "output": "rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting rulefit_max_num_trees", + "output": "rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting rulefit_max_num_trees", + "output": "rulefit max num trees config.toml: Max number of trees for RuleFit models" + }, + { + "prompt_type": "plain", + "instruction": ": What does one_hot_encoding_cardinality_threshold do? : one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "plain", + "instruction": ": Explain one_hot_encoding_cardinality_threshold. : one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one_hot_encoding_cardinality_threshold", + "output": "one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one_hot_encoding_cardinality_threshold", + "output": "one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one hot encoding cardinality threshold", + "output": "one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting one_hot_encoding_cardinality_threshold", + "output": "one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting one_hot_encoding_cardinality_threshold", + "output": "one hot encoding cardinality threshold config.toml: Enable One-Hot-Encoding (which does binning to limit to number of bins to no more than 100 anyway) for categorical columns with fewer than this many unique values Set to 0 to disable" + }, + { + "prompt_type": "plain", + "instruction": ": What does one_hot_encoding_cardinality_threshold_default_use do? : one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "plain", + "instruction": ": Explain one_hot_encoding_cardinality_threshold_default_use. : one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one_hot_encoding_cardinality_threshold_default_use", + "output": "one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one_hot_encoding_cardinality_threshold_default_use", + "output": "one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one hot encoding cardinality threshold default use", + "output": "one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting one_hot_encoding_cardinality_threshold_default_use", + "output": "one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting one_hot_encoding_cardinality_threshold_default_use", + "output": "one hot encoding cardinality threshold default use config.toml: How many levels to choose one-hot by default instead of other encodings, restricted down to 10x less (down to 2 levels) when number of columns able to be used with OHE exceeds 500. Note the total number of bins is reduced if bigger data independently of this." + }, + { + "prompt_type": "plain", + "instruction": ": What does text_as_categorical_cardinality_threshold do? : text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "plain", + "instruction": ": Explain text_as_categorical_cardinality_threshold. : text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_as_categorical_cardinality_threshold", + "output": "text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_as_categorical_cardinality_threshold", + "output": "text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text as categorical cardinality threshold", + "output": "text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting text_as_categorical_cardinality_threshold", + "output": "text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting text_as_categorical_cardinality_threshold", + "output": "text as categorical cardinality threshold config.toml: Treat text columns also as categorical columns if the cardinality is <= this value. Set to 0 to treat text columns only as text." + }, + { + "prompt_type": "plain", + "instruction": ": What does numeric_as_categorical_cardinality_threshold do? : numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "plain", + "instruction": ": Explain numeric_as_categorical_cardinality_threshold. : numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "numeric_as_categorical_cardinality_threshold", + "output": "numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "numeric_as_categorical_cardinality_threshold", + "output": "numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "numeric as categorical cardinality threshold", + "output": "numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting numeric_as_categorical_cardinality_threshold", + "output": "numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting numeric_as_categorical_cardinality_threshold", + "output": "numeric as categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical if num_as_cat is True." + }, + { + "prompt_type": "plain", + "instruction": ": What does numeric_as_ohe_categorical_cardinality_threshold do? : numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "plain", + "instruction": ": Explain numeric_as_ohe_categorical_cardinality_threshold. : numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "numeric_as_ohe_categorical_cardinality_threshold", + "output": "numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "numeric_as_ohe_categorical_cardinality_threshold", + "output": "numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "numeric as ohe categorical cardinality threshold", + "output": "numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting numeric_as_ohe_categorical_cardinality_threshold", + "output": "numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting numeric_as_ohe_categorical_cardinality_threshold", + "output": "numeric as ohe categorical cardinality threshold config.toml: If num_as_cat is true, then treat numeric columns also as categorical columns to possibly one-hot encode if the cardinality is > this value. Setting to 0 allows all numeric to be treated as categorical to possibly ohe-hot encode if num_as_cat is True." + }, + { + "prompt_type": "plain", + "instruction": ": What does one_hot_encoding_show_actual_levels_in_features do? : one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain one_hot_encoding_show_actual_levels_in_features. : one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one_hot_encoding_show_actual_levels_in_features", + "output": "one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one_hot_encoding_show_actual_levels_in_features", + "output": "one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "one hot encoding show actual levels in features", + "output": "one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: ", + "output": "one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting one_hot_encoding_show_actual_levels_in_features", + "output": "one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting one_hot_encoding_show_actual_levels_in_features", + "output": "one hot encoding show actual levels in features config.toml: Whether to show real levels in One Hot Encoding feature names. Leads to feature aggregation problems when switch between binning and not binning in fold splits. Feature description will still contain levels in each bin if True or False.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_ensemble_level do? : fixed ensemble level config.toml: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_ensemble_level. : fixed ensemble level config.toml: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Ensemble level for final modeling pipeline: . : Set the fixed ensemble level config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_ensemble_level", + "output": "fixed ensemble level config.toml: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_ensemble_level", + "output": "fixed ensemble level config.toml: Ensemble level for final modeling pipeline: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed ensemble level", + "output": "fixed ensemble level config.toml: Ensemble level for final modeling pipeline: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Ensemble level for final modeling pipeline: ", + "output": "fixed ensemble level config.toml: Ensemble level for final modeling pipeline: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_ensemble_level", + "output": "fixed ensemble level config.toml: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_ensemble_level", + "output": "fixed ensemble level config.toml: Ensemble level for final modeling pipeline: Fixed ensemble_level-1 = auto, based upon ensemble_accuracy_switch, accuracy, size of data, etc.0 = No ensemble, only final single model on validated iteration/tree count1 = 1 model, multiple ensemble folds (cross-validation)>=2 = >=2 models, multiple ensemble folds (cross-validation) " + }, + { + "prompt_type": "plain", + "instruction": ": What does cross_validate_single_final_model do? : cross validate single final model config.toml: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "plain", + "instruction": ": Explain cross_validate_single_final_model. : cross validate single final model config.toml: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Cross-validate single final model: . : Set the cross validate single final model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cross_validate_single_final_model", + "output": "cross validate single final model config.toml: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cross_validate_single_final_model", + "output": "cross validate single final model config.toml: Cross-validate single final model: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cross validate single final model", + "output": "cross validate single final model config.toml: Cross-validate single final model: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Cross-validate single final model: ", + "output": "cross validate single final model config.toml: Cross-validate single final model: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cross_validate_single_final_model", + "output": "cross validate single final model config.toml: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cross_validate_single_final_model", + "output": "cross validate single final model config.toml: Cross-validate single final model: If enabled, use cross-validation to determine optimal parameters for single final model, and to be able to create training holdout predictions." + }, + { + "prompt_type": "plain", + "instruction": ": What does ensemble_meta_learner do? : ensemble meta learner config.toml: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ensemble_meta_learner. : ensemble meta learner config.toml: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Type of ensemble meta learner. Blender is recommended for most use cases.: . : Set the ensemble meta learner config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ensemble_meta_learner", + "output": "ensemble meta learner config.toml: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ensemble_meta_learner", + "output": "ensemble meta learner config.toml: Type of ensemble meta learner. Blender is recommended for most use cases.: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ensemble meta learner", + "output": "ensemble meta learner config.toml: Type of ensemble meta learner. Blender is recommended for most use cases.: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Type of ensemble meta learner. Blender is recommended for most use cases.: ", + "output": "ensemble meta learner config.toml: Type of ensemble meta learner. Blender is recommended for most use cases.: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ensemble_meta_learner", + "output": "ensemble meta learner config.toml: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ensemble_meta_learner", + "output": "ensemble meta learner config.toml: Type of ensemble meta learner. Blender is recommended for most use cases.: Model to combine base model predictions, for experiments that create a final pipelineconsisting of multiple base models. blender: Creates a linear blend with non-negative weights that add to 1 (blending) - recommended extra_trees: Creates a tree model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. neural_net: Creates a neural net model to non-linearly combine the base models (stacking) - experimental, and recommended to also set enable cross_validate_meta_learner. " + }, + { + "prompt_type": "plain", + "instruction": ": What does cross_validate_meta_learner do? : cross validate meta learner config.toml: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "plain", + "instruction": ": Explain cross_validate_meta_learner. : cross validate meta learner config.toml: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Cross-validate meta learner for final ensemble.: . : Set the cross validate meta learner config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cross_validate_meta_learner", + "output": "cross validate meta learner config.toml: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cross_validate_meta_learner", + "output": "cross validate meta learner config.toml: Cross-validate meta learner for final ensemble.: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cross validate meta learner", + "output": "cross validate meta learner config.toml: Cross-validate meta learner for final ensemble.: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Cross-validate meta learner for final ensemble.: ", + "output": "cross validate meta learner config.toml: Cross-validate meta learner for final ensemble.: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cross_validate_meta_learner", + "output": "cross validate meta learner config.toml: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cross_validate_meta_learner", + "output": "cross validate meta learner config.toml: Cross-validate meta learner for final ensemble.: If enabled, use cross-validation to create an ensemble for the meta learner itself. Especially recommended for``ensemble_meta_learner='extra_trees'``, to make unbiased training holdout predictions.Will disable MOJO if enabled. Not needed for ``ensemble_meta_learner='blender'``.\" " + }, + { + "prompt_type": "plain", + "instruction": ": What does parameter_tuning_num_models do? : parameter tuning num models config.toml: Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain parameter_tuning_num_models. : parameter tuning num models config.toml: Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of models during tuning phase (-1 = auto): . : Set the parameter tuning num models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter_tuning_num_models", + "output": "parameter tuning num models config.toml: Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter_tuning_num_models", + "output": "parameter tuning num models config.toml: Number of models during tuning phase (-1 = auto): Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter tuning num models", + "output": "parameter tuning num models config.toml: Number of models during tuning phase (-1 = auto): Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of models during tuning phase (-1 = auto): ", + "output": "parameter tuning num models config.toml: Number of models during tuning phase (-1 = auto): Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting parameter_tuning_num_models", + "output": "parameter tuning num models config.toml: Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting parameter_tuning_num_models", + "output": "parameter tuning num models config.toml: Number of models during tuning phase (-1 = auto): Number of models to tune during pre-evolution phase Can make this lower to avoid excessive tuning, or make higher to do enhanced tuning. ``-1 : auto`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does parameter_tuning_num_models_sequence do? : parameter tuning num models sequence config.toml: Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain parameter_tuning_num_models_sequence. : parameter tuning num models sequence config.toml: Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of default simple models during tuning phase (-1 = auto): . : Set the parameter tuning num models sequence config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter_tuning_num_models_sequence", + "output": "parameter tuning num models sequence config.toml: Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter_tuning_num_models_sequence", + "output": "parameter tuning num models sequence config.toml: Number of default simple models during tuning phase (-1 = auto): Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter tuning num models sequence", + "output": "parameter tuning num models sequence config.toml: Number of default simple models during tuning phase (-1 = auto): Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of default simple models during tuning phase (-1 = auto): ", + "output": "parameter tuning num models sequence config.toml: Number of default simple models during tuning phase (-1 = auto): Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting parameter_tuning_num_models_sequence", + "output": "parameter tuning num models sequence config.toml: Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting parameter_tuning_num_models_sequence", + "output": "parameter tuning num models sequence config.toml: Number of default simple models during tuning phase (-1 = auto): Number of models (out of all parameter_tuning_num_models) to have as SEQUENCE instead of random features/parameters. ``-1 : auto, use at least one default individual per model class tuned`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does parameter_tuning_num_models_extra do? : parameter tuning num models extra config.toml: Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain parameter_tuning_num_models_extra. : parameter tuning num models extra config.toml: Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of extra models during tuning phase (-1 = auto): . : Set the parameter tuning num models extra config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter_tuning_num_models_extra", + "output": "parameter tuning num models extra config.toml: Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter_tuning_num_models_extra", + "output": "parameter tuning num models extra config.toml: Number of extra models during tuning phase (-1 = auto): Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "parameter tuning num models extra", + "output": "parameter tuning num models extra config.toml: Number of extra models during tuning phase (-1 = auto): Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of extra models during tuning phase (-1 = auto): ", + "output": "parameter tuning num models extra config.toml: Number of extra models during tuning phase (-1 = auto): Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting parameter_tuning_num_models_extra", + "output": "parameter tuning num models extra config.toml: Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting parameter_tuning_num_models_extra", + "output": "parameter tuning num models extra config.toml: Number of extra models during tuning phase (-1 = auto): Number of models to add during tuning that cover other cases, like for TS having no TE on time column groups. ``-1 : auto, adds additional models to protect against overfit on high-gain training features.`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_tuning_instances do? : num tuning instances config.toml: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_tuning_instances. : num tuning instances config.toml: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. in tuning: . : Set the num tuning instances config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_tuning_instances", + "output": "num tuning instances config.toml: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_tuning_instances", + "output": "num tuning instances config.toml: Num. in tuning: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num tuning instances", + "output": "num tuning instances config.toml: Num. in tuning: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. in tuning: ", + "output": "num tuning instances config.toml: Num. in tuning: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_tuning_instances", + "output": "num tuning instances config.toml: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_tuning_instances", + "output": "num tuning instances config.toml: Num. in tuning: Dictionary of model class name (keys) and number (values) of instances." + }, + { + "prompt_type": "plain", + "instruction": ": What does validate_meta_learner do? : validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain validate_meta_learner. : validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "validate_meta_learner", + "output": "validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "validate_meta_learner", + "output": "validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "validate meta learner", + "output": "validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable basic logging and notifications for ensemble meta learner: ", + "output": "validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting validate_meta_learner", + "output": "validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting validate_meta_learner", + "output": "validate meta learner config.toml: Enable basic logging and notifications for ensemble meta learner: " + }, + { + "prompt_type": "plain", + "instruction": ": What does validate_meta_learner_extra do? : validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain validate_meta_learner_extra. : validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "validate_meta_learner_extra", + "output": "validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "validate_meta_learner_extra", + "output": "validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "validate meta learner extra", + "output": "validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: ", + "output": "validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting validate_meta_learner_extra", + "output": "validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting validate_meta_learner_extra", + "output": "validate meta learner extra config.toml: Enable extra logging for ensemble meta learner: ensemble must be at least as good as each base model: " + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_num_folds_evolution do? : fixed num folds evolution config.toml: Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_num_folds_evolution. : fixed num folds evolution config.toml: Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of cross-validation folds for feature evolution (-1 = auto): . : Set the fixed num folds evolution config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_num_folds_evolution", + "output": "fixed num folds evolution config.toml: Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_num_folds_evolution", + "output": "fixed num folds evolution config.toml: Number of cross-validation folds for feature evolution (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed num folds evolution", + "output": "fixed num folds evolution config.toml: Number of cross-validation folds for feature evolution (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of cross-validation folds for feature evolution (-1 = auto): ", + "output": "fixed num folds evolution config.toml: Number of cross-validation folds for feature evolution (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_num_folds_evolution", + "output": "fixed num folds evolution config.toml: Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_num_folds_evolution", + "output": "fixed num folds evolution config.toml: Number of cross-validation folds for feature evolution (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for feature evolution. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_num_folds do? : fixed num folds config.toml: Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_num_folds. : fixed num folds config.toml: Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of cross-validation folds for final model (-1 = auto): . : Set the fixed num folds config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_num_folds", + "output": "fixed num folds config.toml: Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_num_folds", + "output": "fixed num folds config.toml: Number of cross-validation folds for final model (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed num folds", + "output": "fixed num folds config.toml: Number of cross-validation folds for final model (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of cross-validation folds for final model (-1 = auto): ", + "output": "fixed num folds config.toml: Number of cross-validation folds for final model (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_num_folds", + "output": "fixed num folds config.toml: Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_num_folds", + "output": "fixed num folds config.toml: Number of cross-validation folds for final model (-1 = auto): Specify the fixed number of cross-validation folds (if >= 2) for the final model. (The actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_only_first_fold_model do? : fixed only first fold model config.toml: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_only_first_fold_model. : fixed only first fold model config.toml: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Force only first fold for models: . : Set the fixed only first fold model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_only_first_fold_model", + "output": "fixed only first fold model config.toml: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_only_first_fold_model", + "output": "fixed only first fold model config.toml: Force only first fold for models: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed only first fold model", + "output": "fixed only first fold model config.toml: Force only first fold for models: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Force only first fold for models: ", + "output": "fixed only first fold model config.toml: Force only first fold for models: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_only_first_fold_model", + "output": "fixed only first fold model config.toml: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_only_first_fold_model", + "output": "fixed only first fold model config.toml: Force only first fold for models: set \"on\" to force only first fold for models - useful for quick runs regardless of data" + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_fold_reps do? : fixed fold reps config.toml: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_fold_reps. : fixed fold reps config.toml: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of repeated cross-validation folds. 0 is auto.: . : Set the fixed fold reps config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_fold_reps", + "output": "fixed fold reps config.toml: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_fold_reps", + "output": "fixed fold reps config.toml: Number of repeated cross-validation folds. 0 is auto.: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed fold reps", + "output": "fixed fold reps config.toml: Number of repeated cross-validation folds. 0 is auto.: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of repeated cross-validation folds. 0 is auto.: ", + "output": "fixed fold reps config.toml: Number of repeated cross-validation folds. 0 is auto.: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_fold_reps", + "output": "fixed fold reps config.toml: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_fold_reps", + "output": "fixed fold reps config.toml: Number of repeated cross-validation folds. 0 is auto.: Set the number of repeated cross-validation folds for feature evolution and final models (if > 0), 0 is default. Only for ensembles that do cross-validation (so no external validation and not time-series), not for single final models." + }, + { + "prompt_type": "plain", + "instruction": ": What does num_fold_ids_show do? : num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_fold_ids_show. : num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_fold_ids_show", + "output": "num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_fold_ids_show", + "output": "num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num fold ids show", + "output": "num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of fold IDs to show in logs: ", + "output": "num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_fold_ids_show", + "output": "num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_fold_ids_show", + "output": "num fold ids show config.toml: Maximum number of fold IDs to show in logs: " + }, + { + "prompt_type": "plain", + "instruction": ": What does fold_scores_instability_warning_threshold do? : fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fold_scores_instability_warning_threshold. : fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fold_scores_instability_warning_threshold", + "output": "fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fold_scores_instability_warning_threshold", + "output": "fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fold scores instability warning threshold", + "output": "fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Declare positive fold scores as unstable if stddev / mean is larger than this value: ", + "output": "fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fold_scores_instability_warning_threshold", + "output": "fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fold_scores_instability_warning_threshold", + "output": "fold scores instability warning threshold config.toml: Declare positive fold scores as unstable if stddev / mean is larger than this value: " + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_evolution_data_size do? : feature evolution data size config.toml: Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_evolution_data_size. : feature evolution data size config.toml: Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. num. of rows x num. of columns for feature evolution data splits (not for final pipeline): . : Set the feature evolution data size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_evolution_data_size", + "output": "feature evolution data size config.toml: Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_evolution_data_size", + "output": "feature evolution data size config.toml: Max. num. of rows x num. of columns for feature evolution data splits (not for final pipeline): Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature evolution data size", + "output": "feature evolution data size config.toml: Max. num. of rows x num. of columns for feature evolution data splits (not for final pipeline): Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. num. of rows x num. of columns for feature evolution data splits (not for final pipeline): ", + "output": "feature evolution data size config.toml: Max. num. of rows x num. of columns for feature evolution data splits (not for final pipeline): Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_evolution_data_size", + "output": "feature evolution data size config.toml: Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_evolution_data_size", + "output": "feature evolution data size config.toml: Max. num. of rows x num. of columns for feature evolution data splits (not for final pipeline): Upper limit on the number of rows x number of columns for feature evolution (applies to both training and validation/holdout splits)feature evolution is the process that determines which features will be derived.Depending on accuracy settings, a fraction of this value will be used " + }, + { + "prompt_type": "plain", + "instruction": ": What does final_pipeline_data_size do? : final pipeline data size config.toml: Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain final_pipeline_data_size. : final pipeline data size config.toml: Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. num. of rows x num. of columns for reducing training data set (for final pipeline): . : Set the final pipeline data size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "final_pipeline_data_size", + "output": "final pipeline data size config.toml: Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "final_pipeline_data_size", + "output": "final pipeline data size config.toml: Max. num. of rows x num. of columns for reducing training data set (for final pipeline): Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "final pipeline data size", + "output": "final pipeline data size config.toml: Max. num. of rows x num. of columns for reducing training data set (for final pipeline): Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. num. of rows x num. of columns for reducing training data set (for final pipeline): ", + "output": "final pipeline data size config.toml: Max. num. of rows x num. of columns for reducing training data set (for final pipeline): Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting final_pipeline_data_size", + "output": "final pipeline data size config.toml: Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting final_pipeline_data_size", + "output": "final pipeline data size config.toml: Max. num. of rows x num. of columns for reducing training data set (for final pipeline): Upper limit on the number of rows x number of columns for training final pipeline. " + }, + { + "prompt_type": "plain", + "instruction": ": What does limit_validation_size do? : limit validation size config.toml: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain limit_validation_size. : limit validation size config.toml: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Limit validation size: . : Set the limit validation size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_validation_size", + "output": "limit validation size config.toml: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_validation_size", + "output": "limit validation size config.toml: Limit validation size: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit validation size", + "output": "limit validation size config.toml: Limit validation size: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Limit validation size: ", + "output": "limit validation size config.toml: Limit validation size: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting limit_validation_size", + "output": "limit validation size config.toml: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting limit_validation_size", + "output": "limit validation size config.toml: Limit validation size: Whether to automatically limit validation data size using feature_evolution_data_size (giving max_rows_feature_evolution shown in logs) for tuning-evolution, and using final_pipeline_data_size, max_validation_to_training_size_ratio_for_final_ensemble for final model." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_validation_to_training_size_ratio_for_final_ensemble do? : max validation to training size ratio for final ensemble config.toml: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_validation_to_training_size_ratio_for_final_ensemble. : max validation to training size ratio for final ensemble config.toml: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. size of validation data relative to training data (for final pipeline), otherwise will sample: . : Set the max validation to training size ratio for final ensemble config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_validation_to_training_size_ratio_for_final_ensemble", + "output": "max validation to training size ratio for final ensemble config.toml: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_validation_to_training_size_ratio_for_final_ensemble", + "output": "max validation to training size ratio for final ensemble config.toml: Max. size of validation data relative to training data (for final pipeline), otherwise will sample: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max validation to training size ratio for final ensemble", + "output": "max validation to training size ratio for final ensemble config.toml: Max. size of validation data relative to training data (for final pipeline), otherwise will sample: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. size of validation data relative to training data (for final pipeline), otherwise will sample: ", + "output": "max validation to training size ratio for final ensemble config.toml: Max. size of validation data relative to training data (for final pipeline), otherwise will sample: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_validation_to_training_size_ratio_for_final_ensemble", + "output": "max validation to training size ratio for final ensemble config.toml: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_validation_to_training_size_ratio_for_final_ensemble", + "output": "max validation to training size ratio for final ensemble config.toml: Max. size of validation data relative to training data (for final pipeline), otherwise will sample: Smaller values can speed up final pipeline model training, as validation data is only used for early stopping.Note that final model predictions and scores will always be provided on the full dataset provided. " + }, + { + "prompt_type": "plain", + "instruction": ": What does force_stratified_splits_for_imbalanced_threshold_binary do? : force stratified splits for imbalanced threshold binary config.toml: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "plain", + "instruction": ": Explain force_stratified_splits_for_imbalanced_threshold_binary. : force stratified splits for imbalanced threshold binary config.toml: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Perform stratified sampling for binary classification if the target is more imbalanced than this.: . : Set the force stratified splits for imbalanced threshold binary config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force_stratified_splits_for_imbalanced_threshold_binary", + "output": "force stratified splits for imbalanced threshold binary config.toml: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force_stratified_splits_for_imbalanced_threshold_binary", + "output": "force stratified splits for imbalanced threshold binary config.toml: Perform stratified sampling for binary classification if the target is more imbalanced than this.: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force stratified splits for imbalanced threshold binary", + "output": "force stratified splits for imbalanced threshold binary config.toml: Perform stratified sampling for binary classification if the target is more imbalanced than this.: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Perform stratified sampling for binary classification if the target is more imbalanced than this.: ", + "output": "force stratified splits for imbalanced threshold binary config.toml: Perform stratified sampling for binary classification if the target is more imbalanced than this.: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting force_stratified_splits_for_imbalanced_threshold_binary", + "output": "force stratified splits for imbalanced threshold binary config.toml: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting force_stratified_splits_for_imbalanced_threshold_binary", + "output": "force stratified splits for imbalanced threshold binary config.toml: Perform stratified sampling for binary classification if the target is more imbalanced than this.: Ratio of minority to majority class of the target column beyond which stratified sampling is done for binary classification. Otherwise perform random sampling. Set to 0 to always do random sampling. Set to 1 to always do stratified sampling." + }, + { + "prompt_type": "plain", + "instruction": ": What does force_stratified_splits_for_binary_max_rows do? : force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain force_stratified_splits_for_binary_max_rows. : force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force_stratified_splits_for_binary_max_rows", + "output": "force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force_stratified_splits_for_binary_max_rows", + "output": "force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "force stratified splits for binary max rows", + "output": "force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Perform stratified sampling for binary classification if the dataset has fewer rows than this.: ", + "output": "force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting force_stratified_splits_for_binary_max_rows", + "output": "force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting force_stratified_splits_for_binary_max_rows", + "output": "force stratified splits for binary max rows config.toml: Perform stratified sampling for binary classification if the dataset has fewer rows than this.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does stratify_for_regression do? : stratify for regression config.toml: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "plain", + "instruction": ": Explain stratify_for_regression. : stratify for regression config.toml: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Perform stratified sampling for regression problems (using binning).: . : Set the stratify for regression config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stratify_for_regression", + "output": "stratify for regression config.toml: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stratify_for_regression", + "output": "stratify for regression config.toml: Perform stratified sampling for regression problems (using binning).: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stratify for regression", + "output": "stratify for regression config.toml: Perform stratified sampling for regression problems (using binning).: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Perform stratified sampling for regression problems (using binning).: ", + "output": "stratify for regression config.toml: Perform stratified sampling for regression problems (using binning).: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stratify_for_regression", + "output": "stratify for regression config.toml: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stratify_for_regression", + "output": "stratify for regression config.toml: Perform stratified sampling for regression problems (using binning).: Specify whether to do stratified sampling for validation fold creation for iid regression problems. Otherwise perform random sampling." + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_method do? : imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_method. : imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sampling method for imbalanced binary classification problems: . : Set the imbalance sampling method config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_method", + "output": "imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_method", + "output": "imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling method", + "output": "imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sampling method for imbalanced binary classification problems: ", + "output": "imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_method", + "output": "imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_method", + "output": "imbalance sampling method config.toml: Sampling method for imbalanced binary classification problems: Sampling method for imbalanced binary classification problems. Choices are:\"auto\": sample both classes as needed, depending on data\"over_under_sampling\": over-sample the minority class and under-sample the majority class, depending on data\"under_sampling\": under-sample the majority class to reach class balance\"off\": do not perform any sampling " + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_threshold_min_rows_original do? : imbalance sampling threshold min rows original config.toml: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_threshold_min_rows_original. : imbalance sampling threshold min rows original config.toml: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Threshold for minimum number of rows in original training data to allow imbalanced sampling techniques. For smaller data, will disable imbalanced sampling, no matter what imbalance_sampling_method is set to.: . : Set the imbalance sampling threshold min rows original config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_threshold_min_rows_original", + "output": "imbalance sampling threshold min rows original config.toml: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_threshold_min_rows_original", + "output": "imbalance sampling threshold min rows original config.toml: Threshold for minimum number of rows in original training data to allow imbalanced sampling techniques. For smaller data, will disable imbalanced sampling, no matter what imbalance_sampling_method is set to.: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling threshold min rows original", + "output": "imbalance sampling threshold min rows original config.toml: Threshold for minimum number of rows in original training data to allow imbalanced sampling techniques. For smaller data, will disable imbalanced sampling, no matter what imbalance_sampling_method is set to.: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Threshold for minimum number of rows in original training data to allow imbalanced sampling techniques. For smaller data, will disable imbalanced sampling, no matter what imbalance_sampling_method is set to.: ", + "output": "imbalance sampling threshold min rows original config.toml: Threshold for minimum number of rows in original training data to allow imbalanced sampling techniques. For smaller data, will disable imbalanced sampling, no matter what imbalance_sampling_method is set to.: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_threshold_min_rows_original", + "output": "imbalance sampling threshold min rows original config.toml: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_threshold_min_rows_original", + "output": "imbalance sampling threshold min rows original config.toml: Threshold for minimum number of rows in original training data to allow imbalanced sampling techniques. For smaller data, will disable imbalanced sampling, no matter what imbalance_sampling_method is set to.: For smaller data, there's no generally no benefit in using imbalanced sampling methods." + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_ratio_sampling_threshold do? : imbalance ratio sampling threshold config.toml: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_ratio_sampling_threshold. : imbalance ratio sampling threshold config.toml: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Ratio of majority to minority class for imbalanced binary classification to trigger special sampling techniques if enabled: . : Set the imbalance ratio sampling threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_ratio_sampling_threshold", + "output": "imbalance ratio sampling threshold config.toml: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_ratio_sampling_threshold", + "output": "imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for imbalanced binary classification to trigger special sampling techniques if enabled: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance ratio sampling threshold", + "output": "imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for imbalanced binary classification to trigger special sampling techniques if enabled: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Ratio of majority to minority class for imbalanced binary classification to trigger special sampling techniques if enabled: ", + "output": "imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for imbalanced binary classification to trigger special sampling techniques if enabled: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_ratio_sampling_threshold", + "output": "imbalance ratio sampling threshold config.toml: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_ratio_sampling_threshold", + "output": "imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for imbalanced binary classification to trigger special sampling techniques if enabled: For imbalanced binary classification: ratio of majority to minority class equal and above which to enablespecial imbalanced models with sampling techniques (specified by imbalance_sampling_method) to attempt to improve model performance. " + }, + { + "prompt_type": "plain", + "instruction": ": What does heavy_imbalance_ratio_sampling_threshold do? : heavy imbalance ratio sampling threshold config.toml: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain heavy_imbalance_ratio_sampling_threshold. : heavy imbalance ratio sampling threshold config.toml: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Ratio of majority to minority class for heavily imbalanced binary classification to only enable special sampling techniques if enabled: . : Set the heavy imbalance ratio sampling threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "heavy_imbalance_ratio_sampling_threshold", + "output": "heavy imbalance ratio sampling threshold config.toml: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "heavy_imbalance_ratio_sampling_threshold", + "output": "heavy imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for heavily imbalanced binary classification to only enable special sampling techniques if enabled: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "heavy imbalance ratio sampling threshold", + "output": "heavy imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for heavily imbalanced binary classification to only enable special sampling techniques if enabled: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Ratio of majority to minority class for heavily imbalanced binary classification to only enable special sampling techniques if enabled: ", + "output": "heavy imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for heavily imbalanced binary classification to only enable special sampling techniques if enabled: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting heavy_imbalance_ratio_sampling_threshold", + "output": "heavy imbalance ratio sampling threshold config.toml: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting heavy_imbalance_ratio_sampling_threshold", + "output": "heavy imbalance ratio sampling threshold config.toml: Ratio of majority to minority class for heavily imbalanced binary classification to only enable special sampling techniques if enabled: For heavily imbalanced binary classification: ratio of majority to minority class equal and above which to enable onlyspecial imbalanced models on full original data, without upfront sampling. " + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_ratio_multiclass_threshold do? : imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_ratio_multiclass_threshold. : imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to class imbalance: . : Set the imbalance ratio multiclass threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_ratio_multiclass_threshold", + "output": "imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_ratio_multiclass_threshold", + "output": "imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance ratio multiclass threshold", + "output": "imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to class imbalance: ", + "output": "imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_ratio_multiclass_threshold", + "output": "imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_ratio_multiclass_threshold", + "output": "imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "plain", + "instruction": ": What does heavy_imbalance_ratio_multiclass_threshold do? : heavy imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain heavy_imbalance_ratio_multiclass_threshold. : heavy imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to heavy class imbalance: . : Set the heavy imbalance ratio multiclass threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "heavy_imbalance_ratio_multiclass_threshold", + "output": "heavy imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "heavy_imbalance_ratio_multiclass_threshold", + "output": "heavy imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to heavy class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "heavy imbalance ratio multiclass threshold", + "output": "heavy imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to heavy class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to heavy class imbalance: ", + "output": "heavy imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to heavy class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting heavy_imbalance_ratio_multiclass_threshold", + "output": "heavy imbalance ratio multiclass threshold config.toml: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting heavy_imbalance_ratio_multiclass_threshold", + "output": "heavy imbalance ratio multiclass threshold config.toml: Ratio of most frequent to least frequent class for imbalanced multiclass classification problems equal and above which to trigger special handling due to heavy class imbalance: Special handling can include special models, special scorers, special feature engineering. " + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_number_of_bags do? : imbalance sampling number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_number_of_bags. : imbalance sampling number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of bags for sampling methods for imbalanced binary classification (if enabled). -1 for automatic.: . : Set the imbalance sampling number of bags config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_number_of_bags", + "output": "imbalance sampling number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_number_of_bags", + "output": "imbalance sampling number of bags config.toml: Number of bags for sampling methods for imbalanced binary classification (if enabled). -1 for automatic.: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling number of bags", + "output": "imbalance sampling number of bags config.toml: Number of bags for sampling methods for imbalanced binary classification (if enabled). -1 for automatic.: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of bags for sampling methods for imbalanced binary classification (if enabled). -1 for automatic.: ", + "output": "imbalance sampling number of bags config.toml: Number of bags for sampling methods for imbalanced binary classification (if enabled). -1 for automatic.: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_number_of_bags", + "output": "imbalance sampling number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_number_of_bags", + "output": "imbalance sampling number of bags config.toml: Number of bags for sampling methods for imbalanced binary classification (if enabled). -1 for automatic.: -1: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_max_number_of_bags do? : imbalance sampling max number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_max_number_of_bags. : imbalance sampling max number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Hard limit on number of bags for sampling methods for imbalanced binary classification.: . : Set the imbalance sampling max number of bags config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_max_number_of_bags", + "output": "imbalance sampling max number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_max_number_of_bags", + "output": "imbalance sampling max number of bags config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification.: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling max number of bags", + "output": "imbalance sampling max number of bags config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification.: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Hard limit on number of bags for sampling methods for imbalanced binary classification.: ", + "output": "imbalance sampling max number of bags config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification.: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_max_number_of_bags", + "output": "imbalance sampling max number of bags config.toml: -1: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_max_number_of_bags", + "output": "imbalance sampling max number of bags config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification.: -1: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_max_number_of_bags_feature_evolution do? : imbalance sampling max number of bags feature evolution config.toml: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_max_number_of_bags_feature_evolution. : imbalance sampling max number of bags feature evolution config.toml: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Hard limit on number of bags for sampling methods for imbalanced binary classification during feature evolution phase.: . : Set the imbalance sampling max number of bags feature evolution config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_max_number_of_bags_feature_evolution", + "output": "imbalance sampling max number of bags feature evolution config.toml: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_max_number_of_bags_feature_evolution", + "output": "imbalance sampling max number of bags feature evolution config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification during feature evolution phase.: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling max number of bags feature evolution", + "output": "imbalance sampling max number of bags feature evolution config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification during feature evolution phase.: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Hard limit on number of bags for sampling methods for imbalanced binary classification during feature evolution phase.: ", + "output": "imbalance sampling max number of bags feature evolution config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification during feature evolution phase.: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_max_number_of_bags_feature_evolution", + "output": "imbalance sampling max number of bags feature evolution config.toml: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_max_number_of_bags_feature_evolution", + "output": "imbalance sampling max number of bags feature evolution config.toml: Hard limit on number of bags for sampling methods for imbalanced binary classification during feature evolution phase.: Only for shift/leakage/tuning/feature evolution models. Not used for final models. Final models can be limited by imbalance_sampling_max_number_of_bags." + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_max_multiple_data_size do? : imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_max_multiple_data_size. : imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. size of data sampled during imbalanced sampling (in terms of dataset size): . : Set the imbalance sampling max multiple data size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_max_multiple_data_size", + "output": "imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_max_multiple_data_size", + "output": "imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size): Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling max multiple data size", + "output": "imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size): Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. size of data sampled during imbalanced sampling (in terms of dataset size): ", + "output": "imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size): Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_max_multiple_data_size", + "output": "imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_max_multiple_data_size", + "output": "imbalance sampling max multiple data size config.toml: Max. size of data sampled during imbalanced sampling (in terms of dataset size): Max. size of data sampled during imbalanced sampling (in terms of dataset size), controls number of bags (approximately). Only for imbalance_sampling_number_of_bags == -1." + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_rank_averaging do? : imbalance sampling rank averaging config.toml: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_rank_averaging. : imbalance sampling rank averaging config.toml: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to do rank averaging bagged models inside of imbalanced models, instead of probability averaging: . : Set the imbalance sampling rank averaging config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_rank_averaging", + "output": "imbalance sampling rank averaging config.toml: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_rank_averaging", + "output": "imbalance sampling rank averaging config.toml: Whether to do rank averaging bagged models inside of imbalanced models, instead of probability averaging: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling rank averaging", + "output": "imbalance sampling rank averaging config.toml: Whether to do rank averaging bagged models inside of imbalanced models, instead of probability averaging: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to do rank averaging bagged models inside of imbalanced models, instead of probability averaging: ", + "output": "imbalance sampling rank averaging config.toml: Whether to do rank averaging bagged models inside of imbalanced models, instead of probability averaging: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_rank_averaging", + "output": "imbalance sampling rank averaging config.toml: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_rank_averaging", + "output": "imbalance sampling rank averaging config.toml: Whether to do rank averaging bagged models inside of imbalanced models, instead of probability averaging: Rank averaging can be helpful when ensembling diverse models when ranking metrics like AUC/Gini metrics are optimized. No MOJO support yet." + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_sampling_target_minority_fraction do? : imbalance sampling target minority fraction config.toml: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_sampling_target_minority_fraction. : imbalance sampling target minority fraction config.toml: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Target fraction of minority class after applying under/over-sampling techniques. -1.0 for automatic: . : Set the imbalance sampling target minority fraction config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_target_minority_fraction", + "output": "imbalance sampling target minority fraction config.toml: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_sampling_target_minority_fraction", + "output": "imbalance sampling target minority fraction config.toml: Target fraction of minority class after applying under/over-sampling techniques. -1.0 for automatic: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance sampling target minority fraction", + "output": "imbalance sampling target minority fraction config.toml: Target fraction of minority class after applying under/over-sampling techniques. -1.0 for automatic: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Target fraction of minority class after applying under/over-sampling techniques. -1.0 for automatic: ", + "output": "imbalance sampling target minority fraction config.toml: Target fraction of minority class after applying under/over-sampling techniques. -1.0 for automatic: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_sampling_target_minority_fraction", + "output": "imbalance sampling target minority fraction config.toml: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_sampling_target_minority_fraction", + "output": "imbalance sampling target minority fraction config.toml: Target fraction of minority class after applying under/over-sampling techniques. -1.0 for automatic: A value of 0.5 means that models/algorithms will be presented a balanced target class distribution after applying under/over-sampling techniques on the training data. Sometimes it makes sense to choose a smaller value like 0.1 or 0.01 when starting from an extremely imbalanced original target distribution. -1.0: automatic" + }, + { + "prompt_type": "plain", + "instruction": ": What does imbalance_ratio_notification_threshold do? : imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain imbalance_ratio_notification_threshold. : imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_ratio_notification_threshold", + "output": "imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance_ratio_notification_threshold", + "output": "imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "imbalance ratio notification threshold", + "output": "imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting imbalance_ratio_notification_threshold", + "output": "imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting imbalance_ratio_notification_threshold", + "output": "imbalance ratio notification threshold config.toml: For binary classification: ratio of majority to minority class equal and above which to notify of imbalance in GUI to say slightly imbalanced. More than ``imbalance_ratio_sampling_threshold`` will say problem is imbalanced. " + }, + { + "prompt_type": "plain", + "instruction": ": What does nbins_ftrl_list do? : nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain nbins_ftrl_list. : nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nbins_ftrl_list", + "output": "nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nbins_ftrl_list", + "output": "nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "nbins ftrl list", + "output": "nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting nbins_ftrl_list", + "output": "nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting nbins_ftrl_list", + "output": "nbins ftrl list config.toml: List of possible bins for FTRL (largest is default best value)" + }, + { + "prompt_type": "plain", + "instruction": ": What does ftrl_max_interaction_terms_per_degree do? : ftrl max interaction terms per degree config.toml: Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ftrl_max_interaction_terms_per_degree. : ftrl max interaction terms per degree config.toml: Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of automatic FTRL interactions terms for 2nd, 3rd, 4th order interactions terms (each): . : Set the ftrl max interaction terms per degree config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ftrl_max_interaction_terms_per_degree", + "output": "ftrl max interaction terms per degree config.toml: Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ftrl_max_interaction_terms_per_degree", + "output": "ftrl max interaction terms per degree config.toml: Max. number of automatic FTRL interactions terms for 2nd, 3rd, 4th order interactions terms (each): Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ftrl max interaction terms per degree", + "output": "ftrl max interaction terms per degree config.toml: Max. number of automatic FTRL interactions terms for 2nd, 3rd, 4th order interactions terms (each): Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of automatic FTRL interactions terms for 2nd, 3rd, 4th order interactions terms (each): ", + "output": "ftrl max interaction terms per degree config.toml: Max. number of automatic FTRL interactions terms for 2nd, 3rd, 4th order interactions terms (each): Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ftrl_max_interaction_terms_per_degree", + "output": "ftrl max interaction terms per degree config.toml: Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ftrl_max_interaction_terms_per_degree", + "output": "ftrl max interaction terms per degree config.toml: Max. number of automatic FTRL interactions terms for 2nd, 3rd, 4th order interactions terms (each): Samples the number of automatic FTRL interactions terms to no more than this value (for each of 2nd, 3rd, 4th order terms)" + }, + { + "prompt_type": "plain", + "instruction": ": What does te_bin_list do? : te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain te_bin_list. : te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "te_bin_list", + "output": "te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "te_bin_list", + "output": "te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "te bin list", + "output": "te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting te_bin_list", + "output": "te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting te_bin_list", + "output": "te bin list config.toml: List of possible bins for target encoding (first is default value)" + }, + { + "prompt_type": "plain", + "instruction": ": What does woe_bin_list do? : woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "plain", + "instruction": ": Explain woe_bin_list. : woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "woe_bin_list", + "output": "woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "woe_bin_list", + "output": "woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "woe bin list", + "output": "woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting woe_bin_list", + "output": "woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting woe_bin_list", + "output": "woe bin list config.toml: List of possible bins for weight of evidence encoding (first is default value) If only want one value: woe_bin_list = [2]" + }, + { + "prompt_type": "plain", + "instruction": ": What does ohe_bin_list do? : ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ohe_bin_list. : ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ohe_bin_list", + "output": "ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ohe_bin_list", + "output": "ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ohe bin list", + "output": "ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ohe_bin_list", + "output": "ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ohe_bin_list", + "output": "ohe bin list config.toml: List of possible bins for ohe hot encoding (first is default value). If left as default, the actual list is changed for given data size and dials." + }, + { + "prompt_type": "plain", + "instruction": ": What does binner_bin_list do? : binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "plain", + "instruction": ": Explain binner_bin_list. : binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_bin_list", + "output": "binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner_bin_list", + "output": "binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "binner bin list", + "output": "binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting binner_bin_list", + "output": "binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting binner_bin_list", + "output": "binner bin list config.toml: List of max possible number of bins for numeric binning (first is default value). If left as default, the actual list is changed for given data size and dials. The binner will automatically reduce the number of bins based on predictive power." + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_redundant_columns_limit do? : drop redundant columns limit config.toml: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_redundant_columns_limit. : drop redundant columns limit config.toml: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max number of columns to check for redundancy in training dataset.: . : Set the drop redundant columns limit config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_redundant_columns_limit", + "output": "drop redundant columns limit config.toml: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_redundant_columns_limit", + "output": "drop redundant columns limit config.toml: Max number of columns to check for redundancy in training dataset.: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop redundant columns limit", + "output": "drop redundant columns limit config.toml: Max number of columns to check for redundancy in training dataset.: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max number of columns to check for redundancy in training dataset.: ", + "output": "drop redundant columns limit config.toml: Max number of columns to check for redundancy in training dataset.: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_redundant_columns_limit", + "output": "drop redundant columns limit config.toml: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_redundant_columns_limit", + "output": "drop redundant columns limit config.toml: Max number of columns to check for redundancy in training dataset.: If dataset has more columns, then will check only first such columns. Set to 0 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_constant_columns do? : drop constant columns config.toml: Whether to drop columns with constant values" + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_constant_columns. : drop constant columns config.toml: Whether to drop columns with constant values" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Drop constant columns: . : Set the drop constant columns config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_constant_columns", + "output": "drop constant columns config.toml: Whether to drop columns with constant values" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_constant_columns", + "output": "drop constant columns config.toml: Drop constant columns: Whether to drop columns with constant values" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop constant columns", + "output": "drop constant columns config.toml: Drop constant columns: Whether to drop columns with constant values" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Drop constant columns: ", + "output": "drop constant columns config.toml: Drop constant columns: Whether to drop columns with constant values" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_constant_columns", + "output": "drop constant columns config.toml: Whether to drop columns with constant values" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_constant_columns", + "output": "drop constant columns config.toml: Drop constant columns: Whether to drop columns with constant values" + }, + { + "prompt_type": "plain", + "instruction": ": What does detect_duplicate_rows do? : detect duplicate rows config.toml: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "plain", + "instruction": ": Explain detect_duplicate_rows. : detect duplicate rows config.toml: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Detect duplicate rows: . : Set the detect duplicate rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_duplicate_rows", + "output": "detect duplicate rows config.toml: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_duplicate_rows", + "output": "detect duplicate rows config.toml: Detect duplicate rows: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect duplicate rows", + "output": "detect duplicate rows config.toml: Detect duplicate rows: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Detect duplicate rows: ", + "output": "detect duplicate rows config.toml: Detect duplicate rows: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting detect_duplicate_rows", + "output": "detect duplicate rows config.toml: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting detect_duplicate_rows", + "output": "detect duplicate rows config.toml: Detect duplicate rows: Whether to detect duplicate rows in training, validation and testing datasets. Done after doing type detection and dropping of redundant or missing columns across datasets, just before the experiment starts, still before leakage detection. Any further dropping of columns can change the amount of duplicate rows. Informative only, if want to drop rows in training data, make sure to check the drop_duplicate_rows setting. Uses a sample size, given by detect_duplicate_rows_max_rows_x_cols." + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_duplicate_rows_timeout do? : drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_duplicate_rows_timeout. : drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_duplicate_rows_timeout", + "output": "drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_duplicate_rows_timeout", + "output": "drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop duplicate rows timeout", + "output": "drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: ", + "output": "drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_duplicate_rows_timeout", + "output": "drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_duplicate_rows_timeout", + "output": "drop duplicate rows timeout config.toml: Timeout in seconds for dropping duplicate rows in training data, propportionally increases as rows*cols grows as compared to detect_duplicate_rows_max_rows_x_cols.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_duplicate_rows do? : drop duplicate rows config.toml: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_duplicate_rows. : drop duplicate rows config.toml: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Drop duplicate rows in training data: . : Set the drop duplicate rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_duplicate_rows", + "output": "drop duplicate rows config.toml: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_duplicate_rows", + "output": "drop duplicate rows config.toml: Drop duplicate rows in training data: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop duplicate rows", + "output": "drop duplicate rows config.toml: Drop duplicate rows in training data: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Drop duplicate rows in training data: ", + "output": "drop duplicate rows config.toml: Drop duplicate rows in training data: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_duplicate_rows", + "output": "drop duplicate rows config.toml: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_duplicate_rows", + "output": "drop duplicate rows config.toml: Drop duplicate rows in training data: Whether to drop duplicate rows in training data. Done at the start of Driverless AI, only considering columns to drop as given by the user, not considering validation or training datasets or leakage or redundant columns. Any further dropping of columns can change the amount of duplicate rows. Time limited by drop_duplicate_rows_timeout seconds. 'auto': \"off\"\" 'weight': If duplicates, then convert dropped duplicates into a weight column for training. Useful when duplicates are added to preserve some distribution of instances expected. Only allowed if no weight columnn is present, else duplicates are just dropped. 'drop': Drop any duplicates, keeping only first instances. 'off': Do not drop any duplicates. This may lead to over-estimation of accuracy." + }, + { + "prompt_type": "plain", + "instruction": ": What does detect_duplicate_rows_max_rows_x_cols do? : detect duplicate rows max rows x cols config.toml: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain detect_duplicate_rows_max_rows_x_cols. : detect duplicate rows max rows x cols config.toml: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Limit of dataset size in rows x cols for data when detecting duplicate rows: . : Set the detect duplicate rows max rows x cols config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_duplicate_rows_max_rows_x_cols", + "output": "detect duplicate rows max rows x cols config.toml: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_duplicate_rows_max_rows_x_cols", + "output": "detect duplicate rows max rows x cols config.toml: Limit of dataset size in rows x cols for data when detecting duplicate rows: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect duplicate rows max rows x cols", + "output": "detect duplicate rows max rows x cols config.toml: Limit of dataset size in rows x cols for data when detecting duplicate rows: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Limit of dataset size in rows x cols for data when detecting duplicate rows: ", + "output": "detect duplicate rows max rows x cols config.toml: Limit of dataset size in rows x cols for data when detecting duplicate rows: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting detect_duplicate_rows_max_rows_x_cols", + "output": "detect duplicate rows max rows x cols config.toml: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting detect_duplicate_rows_max_rows_x_cols", + "output": "detect duplicate rows max rows x cols config.toml: Limit of dataset size in rows x cols for data when detecting duplicate rows: If > 0, then acts as sampling size for informative duplicate row detection. If set to 0, will do checks for all dataset sizes." + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_id_columns do? : drop id columns config.toml: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_id_columns. : drop id columns config.toml: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Drop ID columns: . : Set the drop id columns config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_id_columns", + "output": "drop id columns config.toml: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_id_columns", + "output": "drop id columns config.toml: Drop ID columns: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop id columns", + "output": "drop id columns config.toml: Drop ID columns: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Drop ID columns: ", + "output": "drop id columns config.toml: Drop ID columns: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_id_columns", + "output": "drop id columns config.toml: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_id_columns", + "output": "drop id columns config.toml: Drop ID columns: Whether to drop columns that appear to be an ID" + }, + { + "prompt_type": "plain", + "instruction": ": What does no_drop_features do? : no drop features config.toml: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain no_drop_features. : no drop features config.toml: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Don't drop any columns: . : Set the no drop features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "no_drop_features", + "output": "no drop features config.toml: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "no_drop_features", + "output": "no drop features config.toml: Don't drop any columns: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "no drop features", + "output": "no drop features config.toml: Don't drop any columns: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Don't drop any columns: ", + "output": "no drop features config.toml: Don't drop any columns: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting no_drop_features", + "output": "no drop features config.toml: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting no_drop_features", + "output": "no drop features config.toml: Don't drop any columns: Whether to avoid dropping any columns (original or derived)" + }, + { + "prompt_type": "plain", + "instruction": ": What does cols_to_drop do? : cols to drop config.toml: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "plain", + "instruction": ": Explain cols_to_drop. : cols to drop config.toml: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Features to drop, e.g. [\"V1\", \"V2\", \"V3\"]: . : Set the cols to drop config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols_to_drop", + "output": "cols to drop config.toml: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols_to_drop", + "output": "cols to drop config.toml: Features to drop, e.g. [\"V1\", \"V2\", \"V3\"]: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols to drop", + "output": "cols to drop config.toml: Features to drop, e.g. [\"V1\", \"V2\", \"V3\"]: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Features to drop, e.g. [\"V1\", \"V2\", \"V3\"]: ", + "output": "cols to drop config.toml: Features to drop, e.g. [\"V1\", \"V2\", \"V3\"]: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cols_to_drop", + "output": "cols to drop config.toml: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cols_to_drop", + "output": "cols to drop config.toml: Features to drop, e.g. [\"V1\", \"V2\", \"V3\"]: Direct control over columns to drop in bulk so can copy-paste large lists instead of selecting each one separately in GUI" + }, + { + "prompt_type": "plain", + "instruction": ": What does cols_to_group_by do? : cols to group by config.toml: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain cols_to_group_by. : cols to group by config.toml: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Features to group by, e.g. [\"G1\", \"G2\", \"G3\"]: . : Set the cols to group by config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols_to_group_by", + "output": "cols to group by config.toml: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols_to_group_by", + "output": "cols to group by config.toml: Features to group by, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols to group by", + "output": "cols to group by config.toml: Features to group by, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Features to group by, e.g. [\"G1\", \"G2\", \"G3\"]: ", + "output": "cols to group by config.toml: Features to group by, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cols_to_group_by", + "output": "cols to group by config.toml: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cols_to_group_by", + "output": "cols to group by config.toml: Features to group by, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to group by for CVCatNumEncode Transformer, default is empty list that means DAI automatically searches all columns,selected randomly or by which have top variable importance.The CVCatNumEncode Transformer takes a list of categoricals (or these cols_to_group_by) and uses those columnsas new feature to perform aggregations on (agg_funcs_for_group_by)." + }, + { + "prompt_type": "plain", + "instruction": ": What does sample_cols_to_group_by do? : sample cols to group by config.toml: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "plain", + "instruction": ": Explain sample_cols_to_group_by. : sample cols to group by config.toml: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample from features to group by: . : Set the sample cols to group by config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sample_cols_to_group_by", + "output": "sample cols to group by config.toml: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sample_cols_to_group_by", + "output": "sample cols to group by config.toml: Sample from features to group by: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sample cols to group by", + "output": "sample cols to group by config.toml: Sample from features to group by: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample from features to group by: ", + "output": "sample cols to group by config.toml: Sample from features to group by: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting sample_cols_to_group_by", + "output": "sample cols to group by config.toml: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting sample_cols_to_group_by", + "output": "sample cols to group by config.toml: Sample from features to group by: Whether to sample from given features to group by (True) or to always group by all features (False) when using cols_to_group_by." + }, + { + "prompt_type": "plain", + "instruction": ": What does agg_funcs_for_group_by do? : agg funcs for group by config.toml: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "plain", + "instruction": ": Explain agg_funcs_for_group_by. : agg funcs for group by config.toml: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Aggregation functions (non-time-series) for group by operations: . : Set the agg funcs for group by config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "agg_funcs_for_group_by", + "output": "agg funcs for group by config.toml: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "agg_funcs_for_group_by", + "output": "agg funcs for group by config.toml: Aggregation functions (non-time-series) for group by operations: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "agg funcs for group by", + "output": "agg funcs for group by config.toml: Aggregation functions (non-time-series) for group by operations: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Aggregation functions (non-time-series) for group by operations: ", + "output": "agg funcs for group by config.toml: Aggregation functions (non-time-series) for group by operations: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting agg_funcs_for_group_by", + "output": "agg funcs for group by config.toml: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting agg_funcs_for_group_by", + "output": "agg funcs for group by config.toml: Aggregation functions (non-time-series) for group by operations: Aggregation functions to use for groupby operations for CVCatNumEncode Transformer, see also cols_to_group_by and sample_cols_to_group_by." + }, + { + "prompt_type": "plain", + "instruction": ": What does folds_for_group_by do? : folds for group by config.toml: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain folds_for_group_by. : folds for group by config.toml: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of folds to obtain aggregation when grouping: . : Set the folds for group by config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "folds_for_group_by", + "output": "folds for group by config.toml: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "folds_for_group_by", + "output": "folds for group by config.toml: Number of folds to obtain aggregation when grouping: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "folds for group by", + "output": "folds for group by config.toml: Number of folds to obtain aggregation when grouping: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of folds to obtain aggregation when grouping: ", + "output": "folds for group by config.toml: Number of folds to obtain aggregation when grouping: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting folds_for_group_by", + "output": "folds for group by config.toml: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting folds_for_group_by", + "output": "folds for group by config.toml: Number of folds to obtain aggregation when grouping: Out of fold aggregations ensure less overfitting, but see less data in each fold. For controlling how many folds used by CVCatNumEncode Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": What does cols_to_force_in do? : cols to force in config.toml: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain cols_to_force_in. : cols to force in config.toml: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Features to force in, e.g. [\"G1\", \"G2\", \"G3\"]: . : Set the cols to force in config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols_to_force_in", + "output": "cols to force in config.toml: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols_to_force_in", + "output": "cols to force in config.toml: Features to force in, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "cols to force in", + "output": "cols to force in config.toml: Features to force in, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Features to force in, e.g. [\"G1\", \"G2\", \"G3\"]: ", + "output": "cols to force in config.toml: Features to force in, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting cols_to_force_in", + "output": "cols to force in config.toml: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting cols_to_force_in", + "output": "cols to force in config.toml: Features to force in, e.g. [\"G1\", \"G2\", \"G3\"]: Control over columns to force-in. Forced-in features are are handled by the most interpretable transformer allowed by experimentoptions, and they are never removed (although model may assign 0 importance to them still).Transformers used by default include:OriginalTransformer for numeric,CatOriginalTransformer or FrequencyTransformer for categorical,TextOriginalTransformer for text,DateTimeOriginalTransformer for date-times,DateOriginalTransformer for dates,ImageOriginalTransformer or ImageVectorizerTransformer for images,etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does mutation_mode do? : mutation mode config.toml: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain mutation_mode. : mutation mode config.toml: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Type of mutation strategy: . : Set the mutation mode config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutation_mode", + "output": "mutation mode config.toml: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutation_mode", + "output": "mutation mode config.toml: Type of mutation strategy: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutation mode", + "output": "mutation mode config.toml: Type of mutation strategy: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Type of mutation strategy: ", + "output": "mutation mode config.toml: Type of mutation strategy: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mutation_mode", + "output": "mutation mode config.toml: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mutation_mode", + "output": "mutation mode config.toml: Type of mutation strategy: Strategy to apply when doing mutations on transformers. Sample mode is default, with tendency to sample transformer parameters. Batched mode tends to do multiple types of the same transformation together. Full mode does even more types of the same transformation together. " + }, + { + "prompt_type": "plain", + "instruction": ": What does leaderboard_mode do? : leaderboard mode config.toml: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "plain", + "instruction": ": Explain leaderboard_mode. : leaderboard mode config.toml: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Control the automatic leaderboard mode: . : Set the leaderboard mode config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leaderboard_mode", + "output": "leaderboard mode config.toml: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leaderboard_mode", + "output": "leaderboard mode config.toml: Control the automatic leaderboard mode: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leaderboard mode", + "output": "leaderboard mode config.toml: Control the automatic leaderboard mode: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Control the automatic leaderboard mode: ", + "output": "leaderboard mode config.toml: Control the automatic leaderboard mode: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leaderboard_mode", + "output": "leaderboard mode config.toml: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leaderboard_mode", + "output": "leaderboard mode config.toml: Control the automatic leaderboard mode: 'baseline': Explore exemplar set of models with baselines as reference.'random': Explore 10 random seeds for same setup. Useful since nature of genetic algorithm is noisy and repeats might get better results, or one can ensemble the custom individuals from such repeats.'line': Explore good model with all features and original features with all models. Useful as first exploration.'line_all': Like 'line', but enable all models and transformers possible instead of only what base experiment setup would have inferred.'product': Explore one-by-one Cartesian product of each model and transformer. Useful for exhaustive exploration." + }, + { + "prompt_type": "plain", + "instruction": ": What does default_knob_offset_accuracy do? : default knob offset accuracy config.toml: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain default_knob_offset_accuracy. : default knob offset accuracy config.toml: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Offset for default accuracy knob: . : Set the default knob offset accuracy config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_knob_offset_accuracy", + "output": "default knob offset accuracy config.toml: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_knob_offset_accuracy", + "output": "default knob offset accuracy config.toml: Offset for default accuracy knob: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default knob offset accuracy", + "output": "default knob offset accuracy config.toml: Offset for default accuracy knob: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Offset for default accuracy knob: ", + "output": "default knob offset accuracy config.toml: Offset for default accuracy knob: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting default_knob_offset_accuracy", + "output": "default knob offset accuracy config.toml: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting default_knob_offset_accuracy", + "output": "default knob offset accuracy config.toml: Offset for default accuracy knob: Allows control over default accuracy knob setting. If default models are too complex, set to -1 or -2, etc. If default models are not accurate enough, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": What does default_knob_offset_time do? : default knob offset time config.toml: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain default_knob_offset_time. : default knob offset time config.toml: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Offset for default time knob: . : Set the default knob offset time config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_knob_offset_time", + "output": "default knob offset time config.toml: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_knob_offset_time", + "output": "default knob offset time config.toml: Offset for default time knob: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default knob offset time", + "output": "default knob offset time config.toml: Offset for default time knob: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Offset for default time knob: ", + "output": "default knob offset time config.toml: Offset for default time knob: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting default_knob_offset_time", + "output": "default knob offset time config.toml: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting default_knob_offset_time", + "output": "default knob offset time config.toml: Offset for default time knob: Allows control over default time knob setting. If default experiments are too slow, set to -1 or -2, etc. If default experiments finish too fast, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": What does default_knob_offset_interpretability do? : default knob offset interpretability config.toml: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain default_knob_offset_interpretability. : default knob offset interpretability config.toml: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Offset for default interpretability knob: . : Set the default knob offset interpretability config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_knob_offset_interpretability", + "output": "default knob offset interpretability config.toml: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default_knob_offset_interpretability", + "output": "default knob offset interpretability config.toml: Offset for default interpretability knob: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "default knob offset interpretability", + "output": "default knob offset interpretability config.toml: Offset for default interpretability knob: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Offset for default interpretability knob: ", + "output": "default knob offset interpretability config.toml: Offset for default interpretability knob: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting default_knob_offset_interpretability", + "output": "default knob offset interpretability config.toml: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting default_knob_offset_interpretability", + "output": "default knob offset interpretability config.toml: Offset for default interpretability knob: Allows control over default interpretability knob setting. If default models are too simple, set to -1 or -2, etc. If default models are too complex, set to 1 or 2, etc. " + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_check_text do? : shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_check_text. : shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_check_text", + "output": "shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_check_text", + "output": "shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift check text", + "output": "shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_check_text", + "output": "shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_check_text", + "output": "shift check text config.toml: Whether to enable checking text for shift, currently only via label encoding." + }, + { + "prompt_type": "plain", + "instruction": ": What does use_rf_for_shift_if_have_lgbm do? : use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_rf_for_shift_if_have_lgbm. : use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_rf_for_shift_if_have_lgbm", + "output": "use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_rf_for_shift_if_have_lgbm", + "output": "use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use rf for shift if have lgbm", + "output": "use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_rf_for_shift_if_have_lgbm", + "output": "use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_rf_for_shift_if_have_lgbm", + "output": "use rf for shift if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for shift detection." + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_key_features_varimp do? : shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_key_features_varimp. : shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_key_features_varimp", + "output": "shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_key_features_varimp", + "output": "shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift key features varimp", + "output": "shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_key_features_varimp", + "output": "shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_key_features_varimp", + "output": "shift key features varimp config.toml: Normalized training variable importance above which to check the feature for shift Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_check_reduced_features do? : shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_check_reduced_features. : shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_check_reduced_features", + "output": "shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_check_reduced_features", + "output": "shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift check reduced features", + "output": "shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_check_reduced_features", + "output": "shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_check_reduced_features", + "output": "shift check reduced features config.toml: Whether to only check certain features based upon the value of shift_key_features_varimp" + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_trees do? : shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_trees. : shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_trees", + "output": "shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_trees", + "output": "shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift trees", + "output": "shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_trees", + "output": "shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_trees", + "output": "shift trees config.toml: Number of trees to use to train model to check shift in distribution No larger than max_nestimators" + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_max_bin do? : shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_max_bin. : shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_max_bin", + "output": "shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_max_bin", + "output": "shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift max bin", + "output": "shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_max_bin", + "output": "shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_max_bin", + "output": "shift max bin config.toml: The value of max_bin to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_min_max_depth do? : shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_min_max_depth. : shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_min_max_depth", + "output": "shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_min_max_depth", + "output": "shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift min max depth", + "output": "shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_min_max_depth", + "output": "shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_min_max_depth", + "output": "shift min max depth config.toml: The min. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_max_max_depth do? : shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_max_max_depth. : shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_max_max_depth", + "output": "shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_max_max_depth", + "output": "shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift max max depth", + "output": "shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_max_max_depth", + "output": "shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_max_max_depth", + "output": "shift max max depth config.toml: The max. value of max_depth to use for trees to use to train model to check shift in distribution" + }, + { + "prompt_type": "plain", + "instruction": ": What does detect_features_distribution_shift_threshold_auc do? : detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain detect_features_distribution_shift_threshold_auc. : detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_features_distribution_shift_threshold_auc", + "output": "detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_features_distribution_shift_threshold_auc", + "output": "detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect features distribution shift threshold auc", + "output": "detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting detect_features_distribution_shift_threshold_auc", + "output": "detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting detect_features_distribution_shift_threshold_auc", + "output": "detect features distribution shift threshold auc config.toml: If distribution shift detection is enabled, show features for which shift AUC is above this value (AUC of a binary classifier that predicts whether given feature value belongs to train or test data)" + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_features_distribution_shift_min_features do? : drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_features_distribution_shift_min_features. : drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_distribution_shift_min_features", + "output": "drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_distribution_shift_min_features", + "output": "drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop features distribution shift min features", + "output": "drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_features_distribution_shift_min_features", + "output": "drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_features_distribution_shift_min_features", + "output": "drop features distribution shift min features config.toml: Minimum number of features to keep, keeping least shifted feature at least if 1" + }, + { + "prompt_type": "plain", + "instruction": ": What does shift_high_notification_level do? : shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "plain", + "instruction": ": Explain shift_high_notification_level. : shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_high_notification_level", + "output": "shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift_high_notification_level", + "output": "shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "shift high notification level", + "output": "shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting shift_high_notification_level", + "output": "shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting shift_high_notification_level", + "output": "shift high notification level config.toml: Shift beyond which shows HIGH notification, else MEDIUM" + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_check_text do? : leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_check_text. : leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_check_text", + "output": "leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_check_text", + "output": "leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage check text", + "output": "leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_check_text", + "output": "leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_check_text", + "output": "leakage check text config.toml: Whether to enable checking text for leakage, currently only via label encoding." + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_key_features_varimp do? : leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_key_features_varimp. : leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_key_features_varimp", + "output": "leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_key_features_varimp", + "output": "leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage key features varimp", + "output": "leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_key_features_varimp", + "output": "leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_key_features_varimp", + "output": "leakage key features varimp config.toml: Normalized training variable importance (per 1 minus AUC/R2 to control for leaky varimp dominance) above which to check the feature for leakage Useful to avoid checking likely unimportant features" + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_key_features_varimp_if_no_early_stopping do? : leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_key_features_varimp_if_no_early_stopping. : leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_key_features_varimp_if_no_early_stopping", + "output": "leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_key_features_varimp_if_no_early_stopping", + "output": "leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage key features varimp if no early stopping", + "output": "leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_key_features_varimp_if_no_early_stopping", + "output": "leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_key_features_varimp_if_no_early_stopping", + "output": "leakage key features varimp if no early stopping config.toml: Like leakage_key_features_varimp, but applies if early stopping disabled when can trust multiple leaks to get uniform varimp." + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_check_reduced_features do? : leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_check_reduced_features. : leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_check_reduced_features", + "output": "leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_check_reduced_features", + "output": "leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage check reduced features", + "output": "leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_check_reduced_features", + "output": "leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_check_reduced_features", + "output": "leakage check reduced features config.toml: Whether to only check certain features based upon the value of leakage_key_features_varimp. If any feature has AUC near 1, will consume all variable importance, even if another feature is also leaky. So False is safest option, but True generally good if many columns." + }, + { + "prompt_type": "plain", + "instruction": ": What does use_rf_for_leakage_if_have_lgbm do? : use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_rf_for_leakage_if_have_lgbm. : use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_rf_for_leakage_if_have_lgbm", + "output": "use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_rf_for_leakage_if_have_lgbm", + "output": "use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use rf for leakage if have lgbm", + "output": "use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_rf_for_leakage_if_have_lgbm", + "output": "use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_rf_for_leakage_if_have_lgbm", + "output": "use rf for leakage if have lgbm config.toml: Whether to use LightGBM random forest mode without early stopping for leakage detection." + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_trees do? : leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_trees. : leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_trees", + "output": "leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_trees", + "output": "leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage trees", + "output": "leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_trees", + "output": "leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_trees", + "output": "leakage trees config.toml: Number of trees to use to train model to check for leakage No larger than max_nestimators" + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_max_bin do? : leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_max_bin. : leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_max_bin", + "output": "leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_max_bin", + "output": "leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage max bin", + "output": "leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_max_bin", + "output": "leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_max_bin", + "output": "leakage max bin config.toml: The value of max_bin to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_min_max_depth do? : leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_min_max_depth. : leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_min_max_depth", + "output": "leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_min_max_depth", + "output": "leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage min max depth", + "output": "leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_min_max_depth", + "output": "leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_min_max_depth", + "output": "leakage min max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_max_max_depth do? : leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_max_max_depth. : leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_max_max_depth", + "output": "leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_max_max_depth", + "output": "leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage max max depth", + "output": "leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_max_max_depth", + "output": "leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_max_max_depth", + "output": "leakage max max depth config.toml: The value of max_depth to use for trees to use to train model to check for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": What does detect_features_leakage_threshold_auc do? : detect features leakage threshold auc config.toml: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "plain", + "instruction": ": Explain detect_features_leakage_threshold_auc. : detect features leakage threshold auc config.toml: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Leakage feature detection AUC/R2 threshold: . : Set the detect features leakage threshold auc config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_features_leakage_threshold_auc", + "output": "detect features leakage threshold auc config.toml: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_features_leakage_threshold_auc", + "output": "detect features leakage threshold auc config.toml: Leakage feature detection AUC/R2 threshold: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect features leakage threshold auc", + "output": "detect features leakage threshold auc config.toml: Leakage feature detection AUC/R2 threshold: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Leakage feature detection AUC/R2 threshold: ", + "output": "detect features leakage threshold auc config.toml: Leakage feature detection AUC/R2 threshold: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting detect_features_leakage_threshold_auc", + "output": "detect features leakage threshold auc config.toml: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting detect_features_leakage_threshold_auc", + "output": "detect features leakage threshold auc config.toml: Leakage feature detection AUC/R2 threshold: When leakage detection is enabled, if AUC (R2 for regression) on original data (label-encoded)is above or equal to this value, then trigger per-feature leakage detection " + }, + { + "prompt_type": "plain", + "instruction": ": What does detect_features_per_feature_leakage_threshold_auc do? : detect features per feature leakage threshold auc config.toml: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "plain", + "instruction": ": Explain detect_features_per_feature_leakage_threshold_auc. : detect features per feature leakage threshold auc config.toml: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Leakage features per feature detection AUC/R2 threshold: . : Set the detect features per feature leakage threshold auc config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_features_per_feature_leakage_threshold_auc", + "output": "detect features per feature leakage threshold auc config.toml: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect_features_per_feature_leakage_threshold_auc", + "output": "detect features per feature leakage threshold auc config.toml: Leakage features per feature detection AUC/R2 threshold: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detect features per feature leakage threshold auc", + "output": "detect features per feature leakage threshold auc config.toml: Leakage features per feature detection AUC/R2 threshold: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Leakage features per feature detection AUC/R2 threshold: ", + "output": "detect features per feature leakage threshold auc config.toml: Leakage features per feature detection AUC/R2 threshold: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting detect_features_per_feature_leakage_threshold_auc", + "output": "detect features per feature leakage threshold auc config.toml: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting detect_features_per_feature_leakage_threshold_auc", + "output": "detect features per feature leakage threshold auc config.toml: Leakage features per feature detection AUC/R2 threshold: When leakage detection is enabled, show features for which AUC (R2 for regression,for whether that predictor/feature alone predicts the target) is above or equal to this value.Feature is dropped if AUC/R2 is above or equal to drop_features_leakage_threshold_auc " + }, + { + "prompt_type": "plain", + "instruction": ": What does drop_features_leakage_min_features do? : drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "plain", + "instruction": ": Explain drop_features_leakage_min_features. : drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_leakage_min_features", + "output": "drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop_features_leakage_min_features", + "output": "drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "drop features leakage min features", + "output": "drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting drop_features_leakage_min_features", + "output": "drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting drop_features_leakage_min_features", + "output": "drop features leakage min features config.toml: Minimum number of features to keep, keeping least leakage feature at least if 1" + }, + { + "prompt_type": "plain", + "instruction": ": What does leakage_train_test_split do? : leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": Explain leakage_train_test_split. : leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_train_test_split", + "output": "leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage_train_test_split", + "output": "leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "leakage train test split", + "output": "leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting leakage_train_test_split", + "output": "leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting leakage_train_test_split", + "output": "leakage train test split config.toml: Ratio of train to validation holdout when testing for leakage" + }, + { + "prompt_type": "plain", + "instruction": ": What does detailed_traces do? : detailed traces config.toml: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain detailed_traces. : detailed traces config.toml: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable detailed traces: . : Set the detailed traces config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detailed_traces", + "output": "detailed traces config.toml: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detailed_traces", + "output": "detailed traces config.toml: Enable detailed traces: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detailed traces", + "output": "detailed traces config.toml: Enable detailed traces: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable detailed traces: ", + "output": "detailed traces config.toml: Enable detailed traces: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting detailed_traces", + "output": "detailed traces config.toml: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting detailed_traces", + "output": "detailed traces config.toml: Enable detailed traces: Whether to enable detailed traces (in GUI Trace)" + }, + { + "prompt_type": "plain", + "instruction": ": What does debug_log do? : debug log config.toml: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain debug_log. : debug log config.toml: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable debug log level: . : Set the debug log config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug_log", + "output": "debug log config.toml: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug_log", + "output": "debug log config.toml: Enable debug log level: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug log", + "output": "debug log config.toml: Enable debug log level: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable debug log level: ", + "output": "debug log config.toml: Enable debug log level: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting debug_log", + "output": "debug log config.toml: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting debug_log", + "output": "debug log config.toml: Enable debug log level: Whether to enable debug log level (in log files)" + }, + { + "prompt_type": "plain", + "instruction": ": What does log_system_info_per_experiment do? : log system info per experiment config.toml: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "plain", + "instruction": ": Explain log_system_info_per_experiment. : log system info per experiment config.toml: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable logging of system information for each experiment: . : Set the log system info per experiment config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_system_info_per_experiment", + "output": "log system info per experiment config.toml: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_system_info_per_experiment", + "output": "log system info per experiment config.toml: Enable logging of system information for each experiment: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log system info per experiment", + "output": "log system info per experiment config.toml: Enable logging of system information for each experiment: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable logging of system information for each experiment: ", + "output": "log system info per experiment config.toml: Enable logging of system information for each experiment: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting log_system_info_per_experiment", + "output": "log system info per experiment config.toml: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting log_system_info_per_experiment", + "output": "log system info per experiment config.toml: Enable logging of system information for each experiment: Whether to add logging of system information such as CPU, GPU, disk space at the start of each experiment log. Same information is already logged in system logs." + }, + { + "prompt_type": "plain", + "instruction": ": What does check_system do? : check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_system. : check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_system", + "output": "check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_system", + "output": "check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check system", + "output": "check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to check system installation on server startup: ", + "output": "check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_system", + "output": "check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_system", + "output": "check system config.toml: Whether to check system installation on server startup: " + }, + { + "prompt_type": "plain", + "instruction": ": What does check_system_basic do? : check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_system_basic. : check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_system_basic", + "output": "check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_system_basic", + "output": "check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check system basic", + "output": "check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to report basic system information on server startup: ", + "output": "check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_system_basic", + "output": "check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_system_basic", + "output": "check system basic config.toml: Whether to report basic system information on server startup: " + }, + { + "prompt_type": "plain", + "instruction": ": What does abs_tol_for_perfect_score do? : abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "plain", + "instruction": ": Explain abs_tol_for_perfect_score. : abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "abs_tol_for_perfect_score", + "output": "abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "abs_tol_for_perfect_score", + "output": "abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "abs tol for perfect score", + "output": "abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting abs_tol_for_perfect_score", + "output": "abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting abs_tol_for_perfect_score", + "output": "abs tol for perfect score config.toml: How close to the optimal value (usually 1 or 0) does the validation score need to be to be considered perfect (to stop the experiment)?" + }, + { + "prompt_type": "plain", + "instruction": ": What does data_ingest_timeout do? : data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_ingest_timeout. : data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_ingest_timeout", + "output": "data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_ingest_timeout", + "output": "data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data ingest timeout", + "output": "data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_ingest_timeout", + "output": "data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_ingest_timeout", + "output": "data ingest timeout config.toml: Timeout in seconds to wait for data ingestion." + }, + { + "prompt_type": "plain", + "instruction": ": What does mutate_timeout do? : mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mutate_timeout. : mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutate_timeout", + "output": "mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutate_timeout", + "output": "mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutate timeout", + "output": "mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mutate_timeout", + "output": "mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mutate_timeout", + "output": "mutate timeout config.toml: How many seconds to allow mutate to take, nominally only takes few seconds at most. But on busy system doing many individuals, might take longer. Optuna sometimes live lock hangs in scipy random distribution maker." + }, + { + "prompt_type": "plain", + "instruction": ": What does gpu_locking_trust_pool_submission do? : gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain gpu_locking_trust_pool_submission. : gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_locking_trust_pool_submission", + "output": "gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_locking_trust_pool_submission", + "output": "gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu locking trust pool submission", + "output": "gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gpu_locking_trust_pool_submission", + "output": "gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gpu_locking_trust_pool_submission", + "output": "gpu locking trust pool submission config.toml: Whether to trust GPU locking for submission of GPU jobs to limit memory usage. If False, then wait for as GPU submissions to be less than number of GPUs, even if later jobs could be purely CPU jobs that did not need to wait. Only applicable if not restricting number of GPUs via num_gpus_per_experiment, else have to use resources instead of relying upon locking. " + }, + { + "prompt_type": "plain", + "instruction": ": What does gpu_locking_free_dead do? : gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "plain", + "instruction": ": Explain gpu_locking_free_dead. : gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_locking_free_dead", + "output": "gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_locking_free_dead", + "output": "gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu locking free dead", + "output": "gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gpu_locking_free_dead", + "output": "gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gpu_locking_free_dead", + "output": "gpu locking free dead config.toml: Whether to steal GPU locks when process is neither on GPU PID list nor using CPU resources at all (e.g. sleeping). Only steal from multi-GPU locks that are incomplete. Prevents deadlocks in case multi-GPU model hangs." + }, + { + "prompt_type": "plain", + "instruction": ": What does log_predict_info do? : log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain log_predict_info. : log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_predict_info", + "output": "log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_predict_info", + "output": "log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log predict info", + "output": "log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to show detailed predict information in logs.: ", + "output": "log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting log_predict_info", + "output": "log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting log_predict_info", + "output": "log predict info config.toml: Whether to show detailed predict information in logs.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does log_fit_info do? : log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain log_fit_info. : log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_fit_info", + "output": "log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_fit_info", + "output": "log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log fit info", + "output": "log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to show detailed fit information in logs.: ", + "output": "log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting log_fit_info", + "output": "log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting log_fit_info", + "output": "log fit info config.toml: Whether to show detailed fit information in logs.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does stalled_time_kill_ref do? : stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "plain", + "instruction": ": Explain stalled_time_kill_ref. : stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stalled_time_kill_ref", + "output": "stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stalled_time_kill_ref", + "output": "stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stalled time kill ref", + "output": "stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stalled_time_kill_ref", + "output": "stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stalled_time_kill_ref", + "output": "stalled time kill ref config.toml: Amount of time to stall (in seconds) before killing the job (assumes it hung). Reference time is scaled by train data shape of rows * cols to get used stalled_time_kill" + }, + { + "prompt_type": "plain", + "instruction": ": What does long_time_psdump do? : long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "plain", + "instruction": ": Explain long_time_psdump. : long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "long_time_psdump", + "output": "long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "long_time_psdump", + "output": "long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "long time psdump", + "output": "long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting long_time_psdump", + "output": "long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting long_time_psdump", + "output": "long time psdump config.toml: Amount of time between checks for some process taking long time, every cycle full process list will be dumped to console or experiment logs if possible." + }, + { + "prompt_type": "plain", + "instruction": ": What does do_psdump do? : do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "plain", + "instruction": ": Explain do_psdump. : do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do_psdump", + "output": "do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do_psdump", + "output": "do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do psdump", + "output": "do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting do_psdump", + "output": "do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting do_psdump", + "output": "do psdump config.toml: Whether to dump ps every long_time_psdump" + }, + { + "prompt_type": "plain", + "instruction": ": What does livelock_signal do? : livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "plain", + "instruction": ": Explain livelock_signal. : livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "livelock_signal", + "output": "livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "livelock_signal", + "output": "livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "livelock signal", + "output": "livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting livelock_signal", + "output": "livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting livelock_signal", + "output": "livelock signal config.toml: Whether to check every long_time_psdump seconds and SIGUSR1 to all children to see where maybe stuck or taking long time." + }, + { + "prompt_type": "plain", + "instruction": ": What does num_cpu_sockets_override do? : num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_cpu_sockets_override. : num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_cpu_sockets_override", + "output": "num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_cpu_sockets_override", + "output": "num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num cpu sockets override", + "output": "num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_cpu_sockets_override", + "output": "num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_cpu_sockets_override", + "output": "num cpu sockets override config.toml: Value to override number of sockets, in case DAIs determination is wrong, for non-trivial systems. 0 means auto." + }, + { + "prompt_type": "plain", + "instruction": ": What does num_gpus_override do? : num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_gpus_override. : num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_override", + "output": "num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_gpus_override", + "output": "num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num gpus override", + "output": "num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_gpus_override", + "output": "num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_gpus_override", + "output": "num gpus override config.toml: Value to override number of GPUs, in case DAIs determination is wrong, for non-trivial systems. -1 means auto.Can also set min_num_cores_per_gpu=-1 to allowany number of GPUs for each experiment regardlessof number of cores." + }, + { + "prompt_type": "plain", + "instruction": ": What does show_gpu_usage_only_if_locked do? : show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_gpu_usage_only_if_locked. : show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_gpu_usage_only_if_locked", + "output": "show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_gpu_usage_only_if_locked", + "output": "show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show gpu usage only if locked", + "output": "show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_gpu_usage_only_if_locked", + "output": "show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_gpu_usage_only_if_locked", + "output": "show gpu usage only if locked config.toml: Whether to show GPU usage only when locking. 'auto' means 'on' if num_gpus_override is different than actual total visible GPUs, else it means 'off'" + }, + { + "prompt_type": "plain", + "instruction": ": What does show_inapplicable_models_preview do? : show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_inapplicable_models_preview. : show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_inapplicable_models_preview", + "output": "show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_inapplicable_models_preview", + "output": "show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show inapplicable models preview", + "output": "show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_inapplicable_models_preview", + "output": "show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_inapplicable_models_preview", + "output": "show inapplicable models preview config.toml: Show inapplicable models in preview, to be sure not missing models one could have used" + }, + { + "prompt_type": "plain", + "instruction": ": What does show_inapplicable_transformers_preview do? : show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_inapplicable_transformers_preview. : show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_inapplicable_transformers_preview", + "output": "show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_inapplicable_transformers_preview", + "output": "show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show inapplicable transformers preview", + "output": "show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_inapplicable_transformers_preview", + "output": "show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_inapplicable_transformers_preview", + "output": "show inapplicable transformers preview config.toml: Show inapplicable transformers in preview, to be sure not missing transformers one could have used" + }, + { + "prompt_type": "plain", + "instruction": ": What does show_warnings_preview do? : show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_warnings_preview. : show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_warnings_preview", + "output": "show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_warnings_preview", + "output": "show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show warnings preview", + "output": "show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_warnings_preview", + "output": "show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_warnings_preview", + "output": "show warnings preview config.toml: Show warnings for models (image auto, Dask multinode/multi-GPU) if conditions are met to use but not chosen to avoid missing models that could benefit accuracy/performance" + }, + { + "prompt_type": "plain", + "instruction": ": What does show_warnings_preview_unused_map_features do? : show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_warnings_preview_unused_map_features. : show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_warnings_preview_unused_map_features", + "output": "show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_warnings_preview_unused_map_features", + "output": "show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show warnings preview unused map features", + "output": "show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_warnings_preview_unused_map_features", + "output": "show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_warnings_preview_unused_map_features", + "output": "show warnings preview unused map features config.toml: Show warnings for models that have no transformers for certain features." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cols_show_unused_features do? : max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cols_show_unused_features. : max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_show_unused_features", + "output": "max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_show_unused_features", + "output": "max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cols show unused features", + "output": "max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cols_show_unused_features", + "output": "max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cols_show_unused_features", + "output": "max cols show unused features config.toml: Up to how many input features to determine, during GUI/client preview, unused features. Too many slows preview down." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cols_show_feature_transformer_mapping do? : max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cols_show_feature_transformer_mapping. : max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_show_feature_transformer_mapping", + "output": "max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_show_feature_transformer_mapping", + "output": "max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cols show feature transformer mapping", + "output": "max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cols_show_feature_transformer_mapping", + "output": "max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cols_show_feature_transformer_mapping", + "output": "max cols show feature transformer mapping config.toml: Up to how many input features to show transformers used for each input feature." + }, + { + "prompt_type": "plain", + "instruction": ": What does warning_unused_feature_show_max do? : warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain warning_unused_feature_show_max. : warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "warning_unused_feature_show_max", + "output": "warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "warning_unused_feature_show_max", + "output": "warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "warning unused feature show max", + "output": "warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting warning_unused_feature_show_max", + "output": "warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting warning_unused_feature_show_max", + "output": "warning unused feature show max config.toml: Up to how many input features to show, in preview, that are unused features." + }, + { + "prompt_type": "plain", + "instruction": ": What does interaction_finder_gini_rel_improvement_threshold do? : interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "plain", + "instruction": ": Explain interaction_finder_gini_rel_improvement_threshold. : interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Required GINI relative improvement for Interactions: . : Set the interaction finder gini rel improvement threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "interaction_finder_gini_rel_improvement_threshold", + "output": "interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "interaction_finder_gini_rel_improvement_threshold", + "output": "interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for Interactions: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "interaction finder gini rel improvement threshold", + "output": "interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for Interactions: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Required GINI relative improvement for Interactions: ", + "output": "interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for Interactions: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting interaction_finder_gini_rel_improvement_threshold", + "output": "interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting interaction_finder_gini_rel_improvement_threshold", + "output": "interaction finder gini rel improvement threshold config.toml: Required GINI relative improvement for Interactions: Required GINI relative improvement for InteractionTransformer. If GINI is not better than this relative improvement compared to original features considered in the interaction, then the interaction is not returned. If noisy data, and no clear signal in interactions but still want interactions, then can decrease this number." + }, + { + "prompt_type": "plain", + "instruction": ": What does interaction_finder_return_limit do? : interaction finder return limit config.toml: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "plain", + "instruction": ": Explain interaction_finder_return_limit. : interaction finder return limit config.toml: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of transformed Interactions to make: . : Set the interaction finder return limit config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "interaction_finder_return_limit", + "output": "interaction finder return limit config.toml: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "interaction_finder_return_limit", + "output": "interaction finder return limit config.toml: Number of transformed Interactions to make: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "interaction finder return limit", + "output": "interaction finder return limit config.toml: Number of transformed Interactions to make: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of transformed Interactions to make: ", + "output": "interaction finder return limit config.toml: Number of transformed Interactions to make: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting interaction_finder_return_limit", + "output": "interaction finder return limit config.toml: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting interaction_finder_return_limit", + "output": "interaction finder return limit config.toml: Number of transformed Interactions to make: Number of transformed Interactions to make as best out of many generated trial interactions." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_bootstrap do? : enable bootstrap config.toml: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_bootstrap. : enable bootstrap config.toml: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to enable bootstrap sampling for validation and test scores.: . : Set the enable bootstrap config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_bootstrap", + "output": "enable bootstrap config.toml: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_bootstrap", + "output": "enable bootstrap config.toml: Whether to enable bootstrap sampling for validation and test scores.: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable bootstrap", + "output": "enable bootstrap config.toml: Whether to enable bootstrap sampling for validation and test scores.: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable bootstrap sampling for validation and test scores.: ", + "output": "enable bootstrap config.toml: Whether to enable bootstrap sampling for validation and test scores.: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_bootstrap", + "output": "enable bootstrap config.toml: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_bootstrap", + "output": "enable bootstrap config.toml: Whether to enable bootstrap sampling for validation and test scores.: Whether to enable bootstrap sampling. Provides error bars to validation and test scores based on the standard error of the bootstrap mean." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_bootstrap_samples do? : min bootstrap samples config.toml: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_bootstrap_samples. : min bootstrap samples config.toml: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minimum number of bootstrap samples: . : Set the min bootstrap samples config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_bootstrap_samples", + "output": "min bootstrap samples config.toml: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_bootstrap_samples", + "output": "min bootstrap samples config.toml: Minimum number of bootstrap samples: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min bootstrap samples", + "output": "min bootstrap samples config.toml: Minimum number of bootstrap samples: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minimum number of bootstrap samples: ", + "output": "min bootstrap samples config.toml: Minimum number of bootstrap samples: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_bootstrap_samples", + "output": "min bootstrap samples config.toml: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_bootstrap_samples", + "output": "min bootstrap samples config.toml: Minimum number of bootstrap samples: Minimum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_bootstrap_samples do? : max bootstrap samples config.toml: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_bootstrap_samples. : max bootstrap samples config.toml: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of bootstrap samples: . : Set the max bootstrap samples config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_bootstrap_samples", + "output": "max bootstrap samples config.toml: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_bootstrap_samples", + "output": "max bootstrap samples config.toml: Maximum number of bootstrap samples: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max bootstrap samples", + "output": "max bootstrap samples config.toml: Maximum number of bootstrap samples: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of bootstrap samples: ", + "output": "max bootstrap samples config.toml: Maximum number of bootstrap samples: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_bootstrap_samples", + "output": "max bootstrap samples config.toml: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_bootstrap_samples", + "output": "max bootstrap samples config.toml: Maximum number of bootstrap samples: Maximum number of bootstrap samples to use for estimating score and its standard deviation Actual number of bootstrap samples will vary between the min and max, depending upon row count (more rows, fewer samples) and accuracy settings (higher accuracy, more samples) " + }, + { + "prompt_type": "plain", + "instruction": ": What does min_bootstrap_sample_size_factor do? : min bootstrap sample size factor config.toml: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_bootstrap_sample_size_factor. : min bootstrap sample size factor config.toml: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minimum fraction of rows to use for bootstrap samples: . : Set the min bootstrap sample size factor config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_bootstrap_sample_size_factor", + "output": "min bootstrap sample size factor config.toml: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_bootstrap_sample_size_factor", + "output": "min bootstrap sample size factor config.toml: Minimum fraction of rows to use for bootstrap samples: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min bootstrap sample size factor", + "output": "min bootstrap sample size factor config.toml: Minimum fraction of rows to use for bootstrap samples: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minimum fraction of rows to use for bootstrap samples: ", + "output": "min bootstrap sample size factor config.toml: Minimum fraction of rows to use for bootstrap samples: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_bootstrap_sample_size_factor", + "output": "min bootstrap sample size factor config.toml: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_bootstrap_sample_size_factor", + "output": "min bootstrap sample size factor config.toml: Minimum fraction of rows to use for bootstrap samples: Minimum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_bootstrap_sample_size_factor do? : max bootstrap sample size factor config.toml: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_bootstrap_sample_size_factor. : max bootstrap sample size factor config.toml: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum fraction of rows to use for bootstrap samples: . : Set the max bootstrap sample size factor config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_bootstrap_sample_size_factor", + "output": "max bootstrap sample size factor config.toml: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_bootstrap_sample_size_factor", + "output": "max bootstrap sample size factor config.toml: Maximum fraction of rows to use for bootstrap samples: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max bootstrap sample size factor", + "output": "max bootstrap sample size factor config.toml: Maximum fraction of rows to use for bootstrap samples: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum fraction of rows to use for bootstrap samples: ", + "output": "max bootstrap sample size factor config.toml: Maximum fraction of rows to use for bootstrap samples: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_bootstrap_sample_size_factor", + "output": "max bootstrap sample size factor config.toml: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_bootstrap_sample_size_factor", + "output": "max bootstrap sample size factor config.toml: Maximum fraction of rows to use for bootstrap samples: Maximum fraction of row size to take as sample size for bootstrap estimator Actual sample size used for bootstrap estimate will vary between the min and max, depending upon row count (more rows, smaller sample size) and accuracy settings (higher accuracy, larger sample size) " + }, + { + "prompt_type": "plain", + "instruction": ": What does bootstrap_final_seed do? : bootstrap final seed config.toml: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain bootstrap_final_seed. : bootstrap final seed config.toml: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Seed to use for final model bootstrap sampling: . : Set the bootstrap final seed config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bootstrap_final_seed", + "output": "bootstrap final seed config.toml: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bootstrap_final_seed", + "output": "bootstrap final seed config.toml: Seed to use for final model bootstrap sampling: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bootstrap final seed", + "output": "bootstrap final seed config.toml: Seed to use for final model bootstrap sampling: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Seed to use for final model bootstrap sampling: ", + "output": "bootstrap final seed config.toml: Seed to use for final model bootstrap sampling: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting bootstrap_final_seed", + "output": "bootstrap final seed config.toml: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting bootstrap_final_seed", + "output": "bootstrap final seed config.toml: Seed to use for final model bootstrap sampling: Seed to use for final model bootstrap sampling, -1 means use experiment-derived seed. E.g. one can retrain final model with different seed to get different final model error bars for scores. " + }, + { + "prompt_type": "plain", + "instruction": ": What does benford_mad_threshold_int do? : benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "plain", + "instruction": ": Explain benford_mad_threshold_int. : benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benford_mad_threshold_int", + "output": "benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benford_mad_threshold_int", + "output": "benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benford mad threshold int", + "output": "benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting benford_mad_threshold_int", + "output": "benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting benford_mad_threshold_int", + "output": "benford mad threshold int config.toml: Benford's law: mean absolute deviance threshold equal and above which integer valued columns are treated as categoricals too" + }, + { + "prompt_type": "plain", + "instruction": ": What does benford_mad_threshold_real do? : benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "plain", + "instruction": ": Explain benford_mad_threshold_real. : benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benford_mad_threshold_real", + "output": "benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benford_mad_threshold_real", + "output": "benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benford mad threshold real", + "output": "benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting benford_mad_threshold_real", + "output": "benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting benford_mad_threshold_real", + "output": "benford mad threshold real config.toml: Benford's law: mean absolute deviance threshold equal and above which real valued columns are treated as categoricals too" + }, + { + "prompt_type": "plain", + "instruction": ": What does varimp_threshold_at_interpretability_10 do? : varimp threshold at interpretability 10 config.toml: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain varimp_threshold_at_interpretability_10. : varimp threshold at interpretability 10 config.toml: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Lowest allowed variable importance at interpretability 10: . : Set the varimp threshold at interpretability 10 config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "varimp_threshold_at_interpretability_10", + "output": "varimp threshold at interpretability 10 config.toml: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "varimp_threshold_at_interpretability_10", + "output": "varimp threshold at interpretability 10 config.toml: Lowest allowed variable importance at interpretability 10: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "varimp threshold at interpretability 10", + "output": "varimp threshold at interpretability 10 config.toml: Lowest allowed variable importance at interpretability 10: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Lowest allowed variable importance at interpretability 10: ", + "output": "varimp threshold at interpretability 10 config.toml: Lowest allowed variable importance at interpretability 10: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting varimp_threshold_at_interpretability_10", + "output": "varimp threshold at interpretability 10 config.toml: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting varimp_threshold_at_interpretability_10", + "output": "varimp threshold at interpretability 10 config.toml: Lowest allowed variable importance at interpretability 10: Variable importance below which feature is dropped (with possible replacement found that is better) This also sets overall scale for lower interpretability settings. Set to lower value if ok with many weak features despite choosing high interpretability, or if see drop in performance due to the need for weak features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_stabilize_varimp_for_ts do? : allow stabilize varimp for ts config.toml: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_stabilize_varimp_for_ts. : allow stabilize varimp for ts config.toml: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to allow stabilization of features using variable importance for time-series: . : Set the allow stabilize varimp for ts config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_stabilize_varimp_for_ts", + "output": "allow stabilize varimp for ts config.toml: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_stabilize_varimp_for_ts", + "output": "allow stabilize varimp for ts config.toml: Whether to allow stabilization of features using variable importance for time-series: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow stabilize varimp for ts", + "output": "allow stabilize varimp for ts config.toml: Whether to allow stabilization of features using variable importance for time-series: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to allow stabilization of features using variable importance for time-series: ", + "output": "allow stabilize varimp for ts config.toml: Whether to allow stabilization of features using variable importance for time-series: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_stabilize_varimp_for_ts", + "output": "allow stabilize varimp for ts config.toml: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_stabilize_varimp_for_ts", + "output": "allow stabilize varimp for ts config.toml: Whether to allow stabilization of features using variable importance for time-series: Whether to avoid setting stabilize_varimp=false and stabilize_fs=false for time series experiments." + }, + { + "prompt_type": "plain", + "instruction": ": What does stabilize_varimp do? : stabilize varimp config.toml: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain stabilize_varimp. : stabilize varimp config.toml: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to take minimum (True) or mean (False) of variable importance when have multiple folds/repeats.: . : Set the stabilize varimp config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize_varimp", + "output": "stabilize varimp config.toml: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize_varimp", + "output": "stabilize varimp config.toml: Whether to take minimum (True) or mean (False) of variable importance when have multiple folds/repeats.: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize varimp", + "output": "stabilize varimp config.toml: Whether to take minimum (True) or mean (False) of variable importance when have multiple folds/repeats.: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to take minimum (True) or mean (False) of variable importance when have multiple folds/repeats.: ", + "output": "stabilize varimp config.toml: Whether to take minimum (True) or mean (False) of variable importance when have multiple folds/repeats.: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stabilize_varimp", + "output": "stabilize varimp config.toml: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stabilize_varimp", + "output": "stabilize varimp config.toml: Whether to take minimum (True) or mean (False) of variable importance when have multiple folds/repeats.: Variable importance is used by genetic algorithm to decide which features are useful, so this can stabilize the feature selection by the genetic algorithm. This is by default disabled for time series experiments, which can have real diverse behavior in each split. But in some cases feature selection is improved in presence of highly shifted variables that are not handled by lag transformers and one can set allow_stabilize_varimp_for_ts=true. " + }, + { + "prompt_type": "plain", + "instruction": ": What does stabilize_fs do? : stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain stabilize_fs. : stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths.: . : Set the stabilize fs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize_fs", + "output": "stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize_fs", + "output": "stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths.: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize fs", + "output": "stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths.: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths.: ", + "output": "stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths.: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stabilize_fs", + "output": "stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stabilize_fs", + "output": "stabilize fs config.toml: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths.: Whether to take minimum (True) or mean (False) of delta improvement in score when aggregating feature selection scores across multiple folds/depths. Delta improvement of score corresponds to original metric minus metric of shuffled feature frame if maximizing metric, and corresponds to negative of such a score difference if minimizing. Feature selection by permutation importance considers the change in score after shuffling a feature, and using minimum operation ignores optimistic scores in favor of pessimistic scores when aggregating over folds. Note, if using tree methods, multiple depths may be fitted, in which case regardless of this toml setting, only features that are kept for all depths are kept by feature selection. If interpretability >= config toml value of fs_data_vary_for_interpretability, then half data (or setting of fs_data_frac) is used as another fit, in which case regardless of this toml setting, only features that are kept for all data sizes are kept by feature selection. Note: This is disabled for small data since arbitrary slices of small data can lead to disjoint features being important and only aggregated average behavior has signal. " + }, + { + "prompt_type": "plain", + "instruction": ": What does stabilize_features do? : stabilize features config.toml: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain stabilize_features. : stabilize features config.toml: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Use tuning-evolution search result for final model transformer.: . : Set the stabilize features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize_features", + "output": "stabilize features config.toml: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize_features", + "output": "stabilize features config.toml: Use tuning-evolution search result for final model transformer.: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stabilize features", + "output": "stabilize features config.toml: Use tuning-evolution search result for final model transformer.: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Use tuning-evolution search result for final model transformer.: ", + "output": "stabilize features config.toml: Use tuning-evolution search result for final model transformer.: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stabilize_features", + "output": "stabilize features config.toml: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stabilize_features", + "output": "stabilize features config.toml: Use tuning-evolution search result for final model transformer.: Whether final pipeline uses fixed features for some transformers that would normally perform search, such as InteractionsTransformer. Use what learned from tuning and evolution (True) or to freshly search for new features (False). This can give a more stable pipeline, especially for small data or when using interaction transformer as pretransformer in multi-layer pipeline. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_rapids_transformers do? : enable rapids transformers config.toml: Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_rapids_transformers. : enable rapids transformers config.toml: Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to enable RAPIDS CUML GPU transformers (no mojo): . : Set the enable rapids transformers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_transformers", + "output": "enable rapids transformers config.toml: Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_transformers", + "output": "enable rapids transformers config.toml: Whether to enable RAPIDS CUML GPU transformers (no mojo): Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable rapids transformers", + "output": "enable rapids transformers config.toml: Whether to enable RAPIDS CUML GPU transformers (no mojo): Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable RAPIDS CUML GPU transformers (no mojo): ", + "output": "enable rapids transformers config.toml: Whether to enable RAPIDS CUML GPU transformers (no mojo): Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_rapids_transformers", + "output": "enable rapids transformers config.toml: Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_rapids_transformers", + "output": "enable rapids transformers config.toml: Whether to enable RAPIDS CUML GPU transformers (no mojo): Whether to enable GPU-based RAPIDS cuML transformers.If want to support Dask RAPIDS transformers, you must set enable_rapids_transformers_dask=true.No mojo support, but Python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_rapids_transformers_dask do? : enable rapids transformers dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_rapids_transformers_dask. : enable rapids transformers dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to enable RAPIDS CUML GPU transformers to use Dask (no mojo): . : Set the enable rapids transformers dask config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_transformers_dask", + "output": "enable rapids transformers dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_rapids_transformers_dask", + "output": "enable rapids transformers dask config.toml: Whether to enable RAPIDS CUML GPU transformers to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable rapids transformers dask", + "output": "enable rapids transformers dask config.toml: Whether to enable RAPIDS CUML GPU transformers to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable RAPIDS CUML GPU transformers to use Dask (no mojo): ", + "output": "enable rapids transformers dask config.toml: Whether to enable RAPIDS CUML GPU transformers to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_rapids_transformers_dask", + "output": "enable rapids transformers dask config.toml: Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_rapids_transformers_dask", + "output": "enable rapids transformers dask config.toml: Whether to enable RAPIDS CUML GPU transformers to use Dask (no mojo): Whether to enable Multi-GPU mode for capable RAPIDS cuML transformers.Must also set enable_rapids_transformers=true.No mojo support, but python scoring is supported.In alpha testing status. " + }, + { + "prompt_type": "plain", + "instruction": ": What does fraction_std_bootstrap_ladder_factor do? : fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fraction_std_bootstrap_ladder_factor. : fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fraction_std_bootstrap_ladder_factor", + "output": "fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fraction_std_bootstrap_ladder_factor", + "output": "fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fraction std bootstrap ladder factor", + "output": "fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: ", + "output": "fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fraction_std_bootstrap_ladder_factor", + "output": "fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fraction_std_bootstrap_ladder_factor", + "output": "fraction std bootstrap ladder factor config.toml: Factor of standard deviation of bootstrap scores by which to accept new model in genetic algorithm. Too small a fraction will lead to accepting new models easily even if no significant improvement in score, while too large a fraction will reject too many good models. Non-zero value is a bit risky when no folds are used in GA, because bootstrap score is only rough estimate of error.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does bootstrap_ladder_samples_limit do? : bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain bootstrap_ladder_samples_limit. : bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bootstrap_ladder_samples_limit", + "output": "bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bootstrap_ladder_samples_limit", + "output": "bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bootstrap ladder samples limit", + "output": "bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: ", + "output": "bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting bootstrap_ladder_samples_limit", + "output": "bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting bootstrap_ladder_samples_limit", + "output": "bootstrap ladder samples limit config.toml: Minimum number of bootstrap samples that are required to limit accepting new model. If less than this, then new model is always accepted.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does meta_weight_allowed_for_reference do? : meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain meta_weight_allowed_for_reference. : meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "meta_weight_allowed_for_reference", + "output": "meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "meta_weight_allowed_for_reference", + "output": "meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "meta weight allowed for reference", + "output": "meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: ", + "output": "meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting meta_weight_allowed_for_reference", + "output": "meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting meta_weight_allowed_for_reference", + "output": "meta weight allowed for reference config.toml: Min. weight of meta learner for reference models during ensembling. If 1.0, then reference model must be the clear winner to be kept. Set to 0.0 to never drop reference models: " + }, + { + "prompt_type": "plain", + "instruction": ": What does show_full_pipeline_details do? : show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain show_full_pipeline_details. : show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_full_pipeline_details", + "output": "show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show_full_pipeline_details", + "output": "show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "show full pipeline details", + "output": "show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to show full pipeline details: ", + "output": "show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting show_full_pipeline_details", + "output": "show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting show_full_pipeline_details", + "output": "show full pipeline details config.toml: Whether to show full pipeline details: " + }, + { + "prompt_type": "plain", + "instruction": ": What does num_transformed_features_per_pipeline_show do? : num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain num_transformed_features_per_pipeline_show. : num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_transformed_features_per_pipeline_show", + "output": "num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num_transformed_features_per_pipeline_show", + "output": "num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "num transformed features per pipeline show", + "output": "num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of features to show when logging size of fitted transformers: ", + "output": "num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting num_transformed_features_per_pipeline_show", + "output": "num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting num_transformed_features_per_pipeline_show", + "output": "num transformed features per pipeline show config.toml: Number of features to show when logging size of fitted transformers: " + }, + { + "prompt_type": "plain", + "instruction": ": What does fs_data_frac do? : fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fs_data_frac. : fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_data_frac", + "output": "fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs_data_frac", + "output": "fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fs data frac", + "output": "fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fraction of data to use for another data slice for FS: ", + "output": "fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fs_data_frac", + "output": "fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fs_data_frac", + "output": "fs data frac config.toml: Fraction of data to use for another data slice for FS: " + }, + { + "prompt_type": "plain", + "instruction": ": What does many_columns_count do? : many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain many_columns_count. : many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "many_columns_count", + "output": "many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "many_columns_count", + "output": "many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "many columns count", + "output": "many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of columns beyond which reduce expensive tasks at cost of some accuracy.: ", + "output": "many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting many_columns_count", + "output": "many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting many_columns_count", + "output": "many columns count config.toml: Number of columns beyond which reduce expensive tasks at cost of some accuracy.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does columns_count_interpretable do? : columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain columns_count_interpretable. : columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "columns_count_interpretable", + "output": "columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "columns_count_interpretable", + "output": "columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "columns count interpretable", + "output": "columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: ", + "output": "columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting columns_count_interpretable", + "output": "columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting columns_count_interpretable", + "output": "columns count interpretable config.toml: Number of columns beyond which do not set default knobs to high interpretability even if bigger data.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does round_up_indivs_for_busy_gpus do? : round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain round_up_indivs_for_busy_gpus. : round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "round_up_indivs_for_busy_gpus", + "output": "round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "round_up_indivs_for_busy_gpus", + "output": "round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "round up indivs for busy gpus", + "output": "round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: ", + "output": "round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting round_up_indivs_for_busy_gpus", + "output": "round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting round_up_indivs_for_busy_gpus", + "output": "round up indivs for busy gpus config.toml: Whether to round-up individuals to ensure all GPUs used. Not always best if (say) have 16 GPUs, better to have multiple experiments if in multi-user environment on single node.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does require_graphviz do? : require graphviz config.toml: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "plain", + "instruction": ": Explain require_graphviz. : require graphviz config.toml: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to require Graphviz package at startup: . : Set the require graphviz config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "require_graphviz", + "output": "require graphviz config.toml: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "require_graphviz", + "output": "require graphviz config.toml: Whether to require Graphviz package at startup: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "require graphviz", + "output": "require graphviz config.toml: Whether to require Graphviz package at startup: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to require Graphviz package at startup: ", + "output": "require graphviz config.toml: Whether to require Graphviz package at startup: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting require_graphviz", + "output": "require graphviz config.toml: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting require_graphviz", + "output": "require graphviz config.toml: Whether to require Graphviz package at startup: Graphviz is an optional requirement for native installations (RPM/DEP/Tar-SH, outside of Docker)to convert .dot files into .png files for pipeline visualizations as part of experiment artifacts" + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_add_genes do? : prob add genes config.toml: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_add_genes. : prob add genes config.toml: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to add transformers: . : Set the prob add genes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_add_genes", + "output": "prob add genes config.toml: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_add_genes", + "output": "prob add genes config.toml: Probability to add transformers: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob add genes", + "output": "prob add genes config.toml: Probability to add transformers: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to add transformers: ", + "output": "prob add genes config.toml: Probability to add transformers: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_add_genes", + "output": "prob add genes config.toml: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_add_genes", + "output": "prob add genes config.toml: Probability to add transformers: Unnormalized probability to add genes or instances of transformers with specific attributes.If no genes can be added, other mutations(mutating models hyper parmaters, pruning genes, pruning features, etc.) are attempted. " + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_addbest_genes do? : prob addbest genes config.toml: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_addbest_genes. : prob addbest genes config.toml: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to add best shared transformers: . : Set the prob addbest genes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_addbest_genes", + "output": "prob addbest genes config.toml: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_addbest_genes", + "output": "prob addbest genes config.toml: Probability to add best shared transformers: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob addbest genes", + "output": "prob addbest genes config.toml: Probability to add best shared transformers: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to add best shared transformers: ", + "output": "prob addbest genes config.toml: Probability to add best shared transformers: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_addbest_genes", + "output": "prob addbest genes config.toml: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_addbest_genes", + "output": "prob addbest genes config.toml: Probability to add best shared transformers: Unnormalized probability, conditioned on prob_add_genes,to add genes or instances of transformers with specific attributesthat have shown to be beneficial to other individuals within the population. " + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_prune_genes do? : prob prune genes config.toml: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_prune_genes. : prob prune genes config.toml: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to prune transformers: . : Set the prob prune genes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_genes", + "output": "prob prune genes config.toml: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_genes", + "output": "prob prune genes config.toml: Probability to prune transformers: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob prune genes", + "output": "prob prune genes config.toml: Probability to prune transformers: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to prune transformers: ", + "output": "prob prune genes config.toml: Probability to prune transformers: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_prune_genes", + "output": "prob prune genes config.toml: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_prune_genes", + "output": "prob prune genes config.toml: Probability to prune transformers: Unnormalized probability to prune genes or instances of transformers with specific attributes.If a variety of transformers with many attributes exists, default value is reasonable.However, if one has fixed set of transformers that should not change or no new transformer attributescan be added, then setting this to 0.0 is reasonable to avoid undesired loss of transformations. " + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_perturb_xgb do? : prob perturb xgb config.toml: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_perturb_xgb. : prob perturb xgb config.toml: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to mutate model parameters: . : Set the prob perturb xgb config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_perturb_xgb", + "output": "prob perturb xgb config.toml: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_perturb_xgb", + "output": "prob perturb xgb config.toml: Probability to mutate model parameters: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob perturb xgb", + "output": "prob perturb xgb config.toml: Probability to mutate model parameters: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to mutate model parameters: ", + "output": "prob perturb xgb config.toml: Probability to mutate model parameters: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_perturb_xgb", + "output": "prob perturb xgb config.toml: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_perturb_xgb", + "output": "prob perturb xgb config.toml: Probability to mutate model parameters: Unnormalized probability change model hyper parameters. " + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_prune_by_features do? : prob prune by features config.toml: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_prune_by_features. : prob prune by features config.toml: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to prune weak features: . : Set the prob prune by features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_by_features", + "output": "prob prune by features config.toml: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_by_features", + "output": "prob prune by features config.toml: Probability to prune weak features: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob prune by features", + "output": "prob prune by features config.toml: Probability to prune weak features: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to prune weak features: ", + "output": "prob prune by features config.toml: Probability to prune weak features: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_prune_by_features", + "output": "prob prune by features config.toml: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_prune_by_features", + "output": "prob prune by features config.toml: Probability to prune weak features: Unnormalized probability to prune features that have low variable importance, as opposed to pruning entire instances of genes/transformers when prob_prune_genes used.If prob_prune_genes=0.0 and prob_prune_by_features==0.0 and prob_prune_by_top_features==0.0, then genes/transformers and transformed features are only pruned if they are:1) inconsistent with the genome2) inconsistent with the column data types3) had no signal (for interactions and cv_in_cv for target encoding)4) transformation failedE.g. these are toml settings are then ignored:1) ngenes_max2) limit_features_by_interpretability3) varimp_threshold_at_interpretability_104) features_allowed_by_interpretability5) remove_scored_0gain_genes_in_postprocessing_above_interpretability6) nfeatures_max_threshold7) features_cost_per_interpSo this acts similar to no_drop_features, except no_drop_features also applies to shift and leak detection, constant columns are not dropped, ID columns are not dropped." + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_prune_by_top_features do? : prob prune by top features config.toml: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_prune_by_top_features. : prob prune by top features config.toml: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to prune strong features: . : Set the prob prune by top features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_by_top_features", + "output": "prob prune by top features config.toml: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_by_top_features", + "output": "prob prune by top features config.toml: Probability to prune strong features: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob prune by top features", + "output": "prob prune by top features config.toml: Probability to prune strong features: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to prune strong features: ", + "output": "prob prune by top features config.toml: Probability to prune strong features: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_prune_by_top_features", + "output": "prob prune by top features config.toml: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_prune_by_top_features", + "output": "prob prune by top features config.toml: Probability to prune strong features: Unnormalized probability to prune features that have high variable importance, in case they have high gain but negaive perfomrance on validation and would otherwise maintain poor validation scores. Similar to prob_prune_by_features but for high gain features." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_num_prune_by_top_features do? : max num prune by top features config.toml: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_num_prune_by_top_features. : max num prune by top features config.toml: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of high gain features to prune each mutation: . : Set the max num prune by top features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_prune_by_top_features", + "output": "max num prune by top features config.toml: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_prune_by_top_features", + "output": "max num prune by top features config.toml: Number of high gain features to prune each mutation: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max num prune by top features", + "output": "max num prune by top features config.toml: Number of high gain features to prune each mutation: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of high gain features to prune each mutation: ", + "output": "max num prune by top features config.toml: Number of high gain features to prune each mutation: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_num_prune_by_top_features", + "output": "max num prune by top features config.toml: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_num_prune_by_top_features", + "output": "max num prune by top features config.toml: Number of high gain features to prune each mutation: Maximum number of high gain features to prune for each mutation call, to control behavior of prob_prune_by_top_features." + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_prune_pretransformer_genes do? : prob prune pretransformer genes config.toml: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_prune_pretransformer_genes. : prob prune pretransformer genes config.toml: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to prune pretransformers: . : Set the prob prune pretransformer genes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_pretransformer_genes", + "output": "prob prune pretransformer genes config.toml: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_pretransformer_genes", + "output": "prob prune pretransformer genes config.toml: Probability to prune pretransformers: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob prune pretransformer genes", + "output": "prob prune pretransformer genes config.toml: Probability to prune pretransformers: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to prune pretransformers: ", + "output": "prob prune pretransformer genes config.toml: Probability to prune pretransformers: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_prune_pretransformer_genes", + "output": "prob prune pretransformer genes config.toml: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_prune_pretransformer_genes", + "output": "prob prune pretransformer genes config.toml: Probability to prune pretransformers: Like prob_prune_genes but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_prune_pretransformer_by_features do? : prob prune pretransformer by features config.toml: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_prune_pretransformer_by_features. : prob prune pretransformer by features config.toml: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to prune weak pretransformer features: . : Set the prob prune pretransformer by features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_pretransformer_by_features", + "output": "prob prune pretransformer by features config.toml: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_pretransformer_by_features", + "output": "prob prune pretransformer by features config.toml: Probability to prune weak pretransformer features: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob prune pretransformer by features", + "output": "prob prune pretransformer by features config.toml: Probability to prune weak pretransformer features: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to prune weak pretransformer features: ", + "output": "prob prune pretransformer by features config.toml: Probability to prune weak pretransformer features: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_prune_pretransformer_by_features", + "output": "prob prune pretransformer by features config.toml: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_prune_pretransformer_by_features", + "output": "prob prune pretransformer by features config.toml: Probability to prune weak pretransformer features: Like prob_prune_by_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_prune_pretransformer_by_top_features do? : prob prune pretransformer by top features config.toml: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_prune_pretransformer_by_top_features. : prob prune pretransformer by top features config.toml: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to prune strong pretransformer features: . : Set the prob prune pretransformer by top features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_pretransformer_by_top_features", + "output": "prob prune pretransformer by top features config.toml: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_prune_pretransformer_by_top_features", + "output": "prob prune pretransformer by top features config.toml: Probability to prune strong pretransformer features: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob prune pretransformer by top features", + "output": "prob prune pretransformer by top features config.toml: Probability to prune strong pretransformer features: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to prune strong pretransformer features: ", + "output": "prob prune pretransformer by top features config.toml: Probability to prune strong pretransformer features: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_prune_pretransformer_by_top_features", + "output": "prob prune pretransformer by top features config.toml: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_prune_pretransformer_by_top_features", + "output": "prob prune pretransformer by top features config.toml: Probability to prune strong pretransformer features: Like prob_prune_by_top_features but only for pretransformers, i.e. those transformers in layers except last layer that connects to model." + }, + { + "prompt_type": "plain", + "instruction": ": What does override_individual_from_toml_list do? : override individual from toml list config.toml: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "plain", + "instruction": ": Explain override_individual_from_toml_list. : override individual from toml list config.toml: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: config.toml items stored in individual to overwrite: . : Set the override individual from toml list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_individual_from_toml_list", + "output": "override individual from toml list config.toml: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_individual_from_toml_list", + "output": "override individual from toml list config.toml: config.toml items stored in individual to overwrite: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override individual from toml list", + "output": "override individual from toml list config.toml: config.toml items stored in individual to overwrite: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "config.toml items stored in individual to overwrite: ", + "output": "override individual from toml list config.toml: config.toml items stored in individual to overwrite: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting override_individual_from_toml_list", + "output": "override individual from toml list config.toml: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting override_individual_from_toml_list", + "output": "override individual from toml list config.toml: config.toml items stored in individual to overwrite: When doing restart, retrain, refit, reset these individual parameters to new toml values." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_approx_max_num_trees_ever do? : fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_approx_max_num_trees_ever. : fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_max_num_trees_ever", + "output": "fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_max_num_trees_ever", + "output": "fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast approx max num trees ever", + "output": "fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_approx_max_num_trees_ever", + "output": "fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_approx_max_num_trees_ever", + "output": "fast approx max num trees ever config.toml: Max. number of trees to use for all tree model predictions. For testing, when predictions don't matter. -1 means disabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_approx_num_trees do? : fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_approx_num_trees. : fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_num_trees", + "output": "fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_num_trees", + "output": "fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast approx num trees", + "output": "fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_approx_num_trees", + "output": "fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_approx_num_trees", + "output": "fast approx num trees config.toml: Max. number of trees to use for fast_approx=True (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_approx_do_one_fold do? : fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_approx_do_one_fold. : fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_do_one_fold", + "output": "fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_do_one_fold", + "output": "fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast approx do one fold", + "output": "fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_approx_do_one_fold", + "output": "fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_approx_do_one_fold", + "output": "fast approx do one fold config.toml: Whether to speed up fast_approx=True further, by using only one fold out of all cross-validation folds (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_approx_do_one_model do? : fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_approx_do_one_model. : fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_do_one_model", + "output": "fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_do_one_model", + "output": "fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast approx do one model", + "output": "fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_approx_do_one_model", + "output": "fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_approx_do_one_model", + "output": "fast approx do one model config.toml: Whether to speed up fast_approx=True further, by using only one model out of all ensemble models (e.g., for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_approx_contribs_num_trees do? : fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_approx_contribs_num_trees. : fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_contribs_num_trees", + "output": "fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_contribs_num_trees", + "output": "fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast approx contribs num trees", + "output": "fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_approx_contribs_num_trees", + "output": "fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_approx_contribs_num_trees", + "output": "fast approx contribs num trees config.toml: Max. number of trees to use for fast_approx_contribs=True (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_approx_contribs_do_one_fold do? : fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_approx_contribs_do_one_fold. : fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_contribs_do_one_fold", + "output": "fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_contribs_do_one_fold", + "output": "fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast approx contribs do one fold", + "output": "fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_approx_contribs_do_one_fold", + "output": "fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_approx_contribs_do_one_fold", + "output": "fast approx contribs do one fold config.toml: Whether to speed up fast_approx_contribs=True further, by using only one fold out of all cross-validation folds (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_approx_contribs_do_one_model do? : fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_approx_contribs_do_one_model. : fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_contribs_do_one_model", + "output": "fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_approx_contribs_do_one_model", + "output": "fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast approx contribs do one model", + "output": "fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_approx_contribs_do_one_model", + "output": "fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_approx_contribs_do_one_model", + "output": "fast approx contribs do one model config.toml: Whether to speed up fast_approx_contribs=True further, by using only one model out of all ensemble models (e.g., for 'Fast Approximation' in GUI when making Shapley predictions, and for AutoDoc/MLI)." + }, + { + "prompt_type": "plain", + "instruction": ": What does use_187_prob_logic do? : use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_187_prob_logic. : use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_187_prob_logic", + "output": "use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_187_prob_logic", + "output": "use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use 187 prob logic", + "output": "use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_187_prob_logic", + "output": "use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_187_prob_logic", + "output": "use 187 prob logic config.toml: Whether to use exploit-explore logic like DAI 1.8.x. False will explore more." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_ohe_linear do? : enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_ohe_linear. : enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_ohe_linear", + "output": "enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_ohe_linear", + "output": "enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable ohe linear", + "output": "enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_ohe_linear", + "output": "enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_ohe_linear", + "output": "enable ohe linear config.toml: Whether to enable cross-validated OneHotEncoding+LinearModel transformer" + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_added_num_classes_switch do? : tensorflow added num classes switch config.toml: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_added_num_classes_switch. : tensorflow added num classes switch config.toml: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. classes above which include Tensorflow: . : Set the tensorflow added num classes switch config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_added_num_classes_switch", + "output": "tensorflow added num classes switch config.toml: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_added_num_classes_switch", + "output": "tensorflow added num classes switch config.toml: Num. classes above which include Tensorflow: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow added num classes switch", + "output": "tensorflow added num classes switch config.toml: Num. classes above which include Tensorflow: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. classes above which include Tensorflow: ", + "output": "tensorflow added num classes switch config.toml: Num. classes above which include Tensorflow: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_added_num_classes_switch", + "output": "tensorflow added num classes switch config.toml: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_added_num_classes_switch", + "output": "tensorflow added num classes switch config.toml: Num. classes above which include Tensorflow: Number of classes above which to include TensorFlow (if TensorFlow is enabled), even if not used exclusively. For small data this is decreased by tensorflow_num_classes_small_data_factor, and for bigger data, this is increased by tensorflow_num_classes_big_data_reduction_factor." + }, + { + "prompt_type": "plain", + "instruction": ": What does tensorflow_num_classes_switch do? : tensorflow num classes switch config.toml: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tensorflow_num_classes_switch. : tensorflow num classes switch config.toml: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Num. classes above which to exclusively use TensorFlow: . : Set the tensorflow num classes switch config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_num_classes_switch", + "output": "tensorflow num classes switch config.toml: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow_num_classes_switch", + "output": "tensorflow num classes switch config.toml: Num. classes above which to exclusively use TensorFlow: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tensorflow num classes switch", + "output": "tensorflow num classes switch config.toml: Num. classes above which to exclusively use TensorFlow: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Num. classes above which to exclusively use TensorFlow: ", + "output": "tensorflow num classes switch config.toml: Num. classes above which to exclusively use TensorFlow: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tensorflow_num_classes_switch", + "output": "tensorflow num classes switch config.toml: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tensorflow_num_classes_switch", + "output": "tensorflow num classes switch config.toml: Num. classes above which to exclusively use TensorFlow: Number of classes above which to only use TensorFlow (if TensorFlow is enabled), instead of others models set on 'auto' (models set to 'on' are still used). Up to tensorflow_num_classes_switch_but_keep_lightgbm, keep LightGBM. If small data, this is increased by tensorflow_num_classes_small_data_factor." + }, + { + "prompt_type": "plain", + "instruction": ": What does prediction_intervals do? : prediction intervals config.toml: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain prediction_intervals. : prediction intervals config.toml: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Compute prediction intervals: . : Set the prediction intervals config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prediction_intervals", + "output": "prediction intervals config.toml: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prediction_intervals", + "output": "prediction intervals config.toml: Compute prediction intervals: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prediction intervals", + "output": "prediction intervals config.toml: Compute prediction intervals: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Compute prediction intervals: ", + "output": "prediction intervals config.toml: Compute prediction intervals: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prediction_intervals", + "output": "prediction intervals config.toml: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prediction_intervals", + "output": "prediction intervals config.toml: Compute prediction intervals: Compute empirical prediction intervals (based on holdout predictions)." + }, + { + "prompt_type": "plain", + "instruction": ": What does prediction_intervals_alpha do? : prediction intervals alpha config.toml: Confidence level for prediction intervals." + }, + { + "prompt_type": "plain", + "instruction": ": Explain prediction_intervals_alpha. : prediction intervals alpha config.toml: Confidence level for prediction intervals." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Confidence level for prediction intervals: . : Set the prediction intervals alpha config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prediction_intervals_alpha", + "output": "prediction intervals alpha config.toml: Confidence level for prediction intervals." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prediction_intervals_alpha", + "output": "prediction intervals alpha config.toml: Confidence level for prediction intervals: Confidence level for prediction intervals." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prediction intervals alpha", + "output": "prediction intervals alpha config.toml: Confidence level for prediction intervals: Confidence level for prediction intervals." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Confidence level for prediction intervals: ", + "output": "prediction intervals alpha config.toml: Confidence level for prediction intervals: Confidence level for prediction intervals." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prediction_intervals_alpha", + "output": "prediction intervals alpha config.toml: Confidence level for prediction intervals." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prediction_intervals_alpha", + "output": "prediction intervals alpha config.toml: Confidence level for prediction intervals: Confidence level for prediction intervals." + }, + { + "prompt_type": "plain", + "instruction": ": What does pred_labels do? : pred labels config.toml: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pred_labels. : pred labels config.toml: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Output labels for predictions created during the experiment for classification problems.: . : Set the pred labels config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pred_labels", + "output": "pred labels config.toml: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pred_labels", + "output": "pred labels config.toml: Output labels for predictions created during the experiment for classification problems.: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pred labels", + "output": "pred labels config.toml: Output labels for predictions created during the experiment for classification problems.: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Output labels for predictions created during the experiment for classification problems.: ", + "output": "pred labels config.toml: Output labels for predictions created during the experiment for classification problems.: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pred_labels", + "output": "pred labels config.toml: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pred_labels", + "output": "pred labels config.toml: Output labels for predictions created during the experiment for classification problems.: Appends one extra output column with predicted target class (after the per-class probabilities). Uses argmax for multiclass, and the threshold defined by the optimal scorer controlled by the 'threshold_scorer' expert setting for binary problems. This setting controls the training, validation and test set predictions (if applicable) that are created by the experiment. MOJO, scoring pipeline and client APIs control this behavior via their own version of this parameter." + }, + { + "prompt_type": "plain", + "instruction": ": What does textlin_num_classes_switch do? : textlin num classes switch config.toml: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain textlin_num_classes_switch. : textlin num classes switch config.toml: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Class count above which do not use TextLin Transformer: . : Set the textlin num classes switch config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "textlin_num_classes_switch", + "output": "textlin num classes switch config.toml: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "textlin_num_classes_switch", + "output": "textlin num classes switch config.toml: Class count above which do not use TextLin Transformer: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "textlin num classes switch", + "output": "textlin num classes switch config.toml: Class count above which do not use TextLin Transformer: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Class count above which do not use TextLin Transformer: ", + "output": "textlin num classes switch config.toml: Class count above which do not use TextLin Transformer: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting textlin_num_classes_switch", + "output": "textlin num classes switch config.toml: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting textlin_num_classes_switch", + "output": "textlin num classes switch config.toml: Class count above which do not use TextLin Transformer: Class count above which do not use TextLin Transformer." + }, + { + "prompt_type": "plain", + "instruction": ": What does text_transformers_max_vocabulary_size do? : text transformers max vocabulary size config.toml: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "plain", + "instruction": ": Explain text_transformers_max_vocabulary_size. : text transformers max vocabulary size config.toml: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max size of the vocabulary for text transformers.: . : Set the text transformers max vocabulary size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_transformers_max_vocabulary_size", + "output": "text transformers max vocabulary size config.toml: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text_transformers_max_vocabulary_size", + "output": "text transformers max vocabulary size config.toml: Max size of the vocabulary for text transformers.: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "text transformers max vocabulary size", + "output": "text transformers max vocabulary size config.toml: Max size of the vocabulary for text transformers.: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max size of the vocabulary for text transformers.: ", + "output": "text transformers max vocabulary size config.toml: Max size of the vocabulary for text transformers.: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting text_transformers_max_vocabulary_size", + "output": "text transformers max vocabulary size config.toml: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting text_transformers_max_vocabulary_size", + "output": "text transformers max vocabulary size config.toml: Max size of the vocabulary for text transformers.: Max size (in tokens) of the vocabulary created during fitting of Tfidf/Count based text transformers (not CNN/BERT). If multiple values are provided, will use the first one for initial models, and use remaining values during parameter tuning and feature evolution. Values smaller than 10000 are recommended for speed, and a reasonable set of choices include: 100, 1000, 5000, 10000, 50000, 100000, 500000." + }, + { + "prompt_type": "plain", + "instruction": ": What does number_of_texts_to_cache_in_bert_transformer do? : number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain number_of_texts_to_cache_in_bert_transformer. : number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "number_of_texts_to_cache_in_bert_transformer", + "output": "number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "number_of_texts_to_cache_in_bert_transformer", + "output": "number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "number of texts to cache in bert transformer", + "output": "number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting number_of_texts_to_cache_in_bert_transformer", + "output": "number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting number_of_texts_to_cache_in_bert_transformer", + "output": "number of texts to cache in bert transformer config.toml: Enables caching of BERT embeddings by temporally saving the embedding vectors to the experiment directory. Set to -1 to cache all text, set to 0 to disable caching. " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_abs_score_delta_train_valid do? : max abs score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_abs_score_delta_train_valid. : max abs score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. absolute delta between training and validation scores for tree models.: . : Set the max abs score delta train valid config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_abs_score_delta_train_valid", + "output": "max abs score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_abs_score_delta_train_valid", + "output": "max abs score delta train valid config.toml: Max. absolute delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max abs score delta train valid", + "output": "max abs score delta train valid config.toml: Max. absolute delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. absolute delta between training and validation scores for tree models.: ", + "output": "max abs score delta train valid config.toml: Max. absolute delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_abs_score_delta_train_valid", + "output": "max abs score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_abs_score_delta_train_valid", + "output": "max abs score delta train valid config.toml: Max. absolute delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this absolute value (i.e., stop adding trees once abs(train_score - valid_score) > max_abs_score_delta_train_valid). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rel_score_delta_train_valid do? : max rel score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rel_score_delta_train_valid. : max rel score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. relative delta between training and validation scores for tree models.: . : Set the max rel score delta train valid config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rel_score_delta_train_valid", + "output": "max rel score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rel_score_delta_train_valid", + "output": "max rel score delta train valid config.toml: Max. relative delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rel score delta train valid", + "output": "max rel score delta train valid config.toml: Max. relative delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. relative delta between training and validation scores for tree models.: ", + "output": "max rel score delta train valid config.toml: Max. relative delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rel_score_delta_train_valid", + "output": "max rel score delta train valid config.toml: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rel_score_delta_train_valid", + "output": "max rel score delta train valid config.toml: Max. relative delta between training and validation scores for tree models.: Modify early stopping behavior for tree-based models (LightGBM, XGBoostGBM, CatBoost) such that training score (on training data, not holdout) and validation score differ no more than this relative value (i.e., stop adding trees once abs(train_score - valid_score) > max_rel_score_delta_train_valid * abs(train_score)). Keep in mind that the meaning of this value depends on the chosen scorer and the dataset (i.e., 0.01 for LogLoss is different than 0.01 for MSE). Experimental option, only for expert use to keep model complexity low. To disable, set to 0.0" + }, + { + "prompt_type": "plain", + "instruction": ": What does glm_lambda_search do? : glm lambda search config.toml: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain glm_lambda_search. : glm lambda search config.toml: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Do lambda search for GLM: . : Set the glm lambda search config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_lambda_search", + "output": "glm lambda search config.toml: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_lambda_search", + "output": "glm lambda search config.toml: Do lambda search for GLM: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm lambda search", + "output": "glm lambda search config.toml: Do lambda search for GLM: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Do lambda search for GLM: ", + "output": "glm lambda search config.toml: Do lambda search for GLM: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting glm_lambda_search", + "output": "glm lambda search config.toml: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting glm_lambda_search", + "output": "glm lambda search config.toml: Do lambda search for GLM: Whether to search for optimal lambda for given alpha for XGBoost GLM. If 'auto', disabled if training data has more rows * cols than final_pipeline_data_size or for multiclass experiments. Disabled always for ensemble_level = 0. Not always a good approach, can be slow for little payoff compared to grid search. " + }, + { + "prompt_type": "plain", + "instruction": ": What does glm_lambda_search_by_eval_metric do? : glm lambda search by eval metric config.toml: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain glm_lambda_search_by_eval_metric. : glm lambda search by eval metric config.toml: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Do lambda search for GLM by exact eval metric: . : Set the glm lambda search by eval metric config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_lambda_search_by_eval_metric", + "output": "glm lambda search by eval metric config.toml: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm_lambda_search_by_eval_metric", + "output": "glm lambda search by eval metric config.toml: Do lambda search for GLM by exact eval metric: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "glm lambda search by eval metric", + "output": "glm lambda search by eval metric config.toml: Do lambda search for GLM by exact eval metric: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Do lambda search for GLM by exact eval metric: ", + "output": "glm lambda search by eval metric config.toml: Do lambda search for GLM by exact eval metric: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting glm_lambda_search_by_eval_metric", + "output": "glm lambda search by eval metric config.toml: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting glm_lambda_search_by_eval_metric", + "output": "glm lambda search by eval metric config.toml: Do lambda search for GLM by exact eval metric: If XGBoost GLM lambda search is enabled, whether to do search by the eval metric (True) or using the actual DAI scorer (False)." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_early_stopping_threshold do? : enable early stopping threshold config.toml: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_early_stopping_threshold. : enable early stopping threshold config.toml: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Early stopping threshold: . : Set the enable early stopping threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_early_stopping_threshold", + "output": "enable early stopping threshold config.toml: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_early_stopping_threshold", + "output": "enable early stopping threshold config.toml: Early stopping threshold: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable early stopping threshold", + "output": "enable early stopping threshold config.toml: Early stopping threshold: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Early stopping threshold: ", + "output": "enable early stopping threshold config.toml: Early stopping threshold: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_early_stopping_threshold", + "output": "enable early stopping threshold config.toml: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_early_stopping_threshold", + "output": "enable early stopping threshold config.toml: Early stopping threshold: Whether to enable early stopping threshold for LightGBM, varying by accuracy. Stops training once validation score changes by less than the threshold. This leads to fewer trees, usually avoiding wasteful trees, but may lower accuracy. However, it may also improve generalization by avoiding fine-tuning to validation set. 0 leads to value of 0 used, i.e. disabled > 0 means non-automatic mode using that *relative* value, scaled by first tree results of the metric for any metric. -1 means always enable, but the threshold itself is automatic (lower the accuracy, the larger the threshold). -2 means fully automatic mode, i.e. disabled unless reduce_mojo_size is true. In true, the lower the accuracy, the larger the threshold. NOTE: Automatic threshold is set so relative value of metric's min_delta in LightGBM's callback for early stopping is: if accuracy <= 1: early_stopping_threshold = 1e-1 elif accuracy <= 4: early_stopping_threshold = 1e-2 elif accuracy <= 7: early_stopping_threshold = 1e-3 elif accuracy <= 9: early_stopping_threshold = 1e-4 else: early_stopping_threshold = 0 " + }, + { + "prompt_type": "plain", + "instruction": ": What does max_varimp_to_save do? : max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_varimp_to_save. : max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_varimp_to_save", + "output": "max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_varimp_to_save", + "output": "max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max varimp to save", + "output": "max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_varimp_to_save", + "output": "max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_varimp_to_save", + "output": "max varimp to save config.toml: Max. number of top variable importances to save per iteration (GUI can only display a max. of 14)" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_num_varimp_to_log do? : max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_num_varimp_to_log. : max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_varimp_to_log", + "output": "max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_varimp_to_log", + "output": "max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max num varimp to log", + "output": "max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_num_varimp_to_log", + "output": "max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_num_varimp_to_log", + "output": "max num varimp to log config.toml: Max. number of top variable importances to show in logs during feature evolution" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_num_varimp_shift_to_log do? : max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_num_varimp_shift_to_log. : max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_varimp_shift_to_log", + "output": "max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_num_varimp_shift_to_log", + "output": "max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max num varimp shift to log", + "output": "max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_num_varimp_shift_to_log", + "output": "max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_num_varimp_shift_to_log", + "output": "max num varimp shift to log config.toml: Max. number of top variable importance shifts to show in logs and GUI after final model built" + }, + { + "prompt_type": "plain", + "instruction": ": What does skip_transformer_failures do? : skip transformer failures config.toml: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain skip_transformer_failures. : skip transformer failures config.toml: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to skip failures of transformers: . : Set the skip transformer failures config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_transformer_failures", + "output": "skip transformer failures config.toml: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_transformer_failures", + "output": "skip transformer failures config.toml: Whether to skip failures of transformers: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip transformer failures", + "output": "skip transformer failures config.toml: Whether to skip failures of transformers: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to skip failures of transformers: ", + "output": "skip transformer failures config.toml: Whether to skip failures of transformers: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting skip_transformer_failures", + "output": "skip transformer failures config.toml: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting skip_transformer_failures", + "output": "skip transformer failures config.toml: Whether to skip failures of transformers: Skipping just avoids the failed transformer.Sometimes python multiprocessing swallows exceptions,so skipping and logging exceptions is also more reliable way to handle them.Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Features that fail are pruned from the individual.If that leaves no features in the individual, then backend tuning, feature/model tuning, final model building, etc.will still fail since DAI should not continue if all features are from a failed state. " + }, + { + "prompt_type": "plain", + "instruction": ": What does skip_model_failures do? : skip model failures config.toml: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain skip_model_failures. : skip model failures config.toml: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to skip failures of models: . : Set the skip model failures config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_model_failures", + "output": "skip model failures config.toml: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_model_failures", + "output": "skip model failures config.toml: Whether to skip failures of models: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip model failures", + "output": "skip model failures config.toml: Whether to skip failures of models: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to skip failures of models: ", + "output": "skip model failures config.toml: Whether to skip failures of models: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting skip_model_failures", + "output": "skip model failures config.toml: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting skip_model_failures", + "output": "skip model failures config.toml: Whether to skip failures of models: Skipping just avoids the failed model. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error. " + }, + { + "prompt_type": "plain", + "instruction": ": What does skip_scorer_failures do? : skip scorer failures config.toml: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain skip_scorer_failures. : skip scorer failures config.toml: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to skip failures of scorers: . : Set the skip scorer failures config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_scorer_failures", + "output": "skip scorer failures config.toml: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_scorer_failures", + "output": "skip scorer failures config.toml: Whether to skip failures of scorers: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip scorer failures", + "output": "skip scorer failures config.toml: Whether to skip failures of scorers: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to skip failures of scorers: ", + "output": "skip scorer failures config.toml: Whether to skip failures of scorers: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting skip_scorer_failures", + "output": "skip scorer failures config.toml: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting skip_scorer_failures", + "output": "skip scorer failures config.toml: Whether to skip failures of scorers: Skipping just avoids the failed scorer if among many scorers. Failures are logged depending upon detailed_skip_failure_messages_level.\"Recipe can raise h2oaicore.systemutils.IgnoreError to ignore error and avoid logging error.Default is True to avoid failing in, e.g., final model building due to a single scorer. " + }, + { + "prompt_type": "plain", + "instruction": ": What does skip_data_recipe_failures do? : skip data recipe failures config.toml: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain skip_data_recipe_failures. : skip data recipe failures config.toml: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to skip runtime data recipe failures: . : Set the skip data recipe failures config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_data_recipe_failures", + "output": "skip data recipe failures config.toml: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_data_recipe_failures", + "output": "skip data recipe failures config.toml: Whether to skip runtime data recipe failures: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip data recipe failures", + "output": "skip data recipe failures config.toml: Whether to skip runtime data recipe failures: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to skip runtime data recipe failures: ", + "output": "skip data recipe failures config.toml: Whether to skip runtime data recipe failures: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting skip_data_recipe_failures", + "output": "skip data recipe failures config.toml: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting skip_data_recipe_failures", + "output": "skip data recipe failures config.toml: Whether to skip runtime data recipe failures: Skipping avoids the failed recipe. Failures are logged depending upon detailed_skip_failure_messages_level.\"Default is False because runtime data recipes are one-time at start of experiment and expected to work by default. " + }, + { + "prompt_type": "plain", + "instruction": ": What does can_skip_final_upper_layer_failures do? : can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "plain", + "instruction": ": Explain can_skip_final_upper_layer_failures. : can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "can_skip_final_upper_layer_failures", + "output": "can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "can_skip_final_upper_layer_failures", + "output": "can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "can skip final upper layer failures", + "output": "can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting can_skip_final_upper_layer_failures", + "output": "can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting can_skip_final_upper_layer_failures", + "output": "can skip final upper layer failures config.toml: Whether can skip final model transformer failures for layer > first layer for multi-layer pipeline." + }, + { + "prompt_type": "plain", + "instruction": ": What does detailed_skip_failure_messages_level do? : detailed skip failure messages level config.toml: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain detailed_skip_failure_messages_level. : detailed skip failure messages level config.toml: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Level to log (0=simple message 1=code line plus message 2=detailed stack traces) for skipped failures.: . : Set the detailed skip failure messages level config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detailed_skip_failure_messages_level", + "output": "detailed skip failure messages level config.toml: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detailed_skip_failure_messages_level", + "output": "detailed skip failure messages level config.toml: Level to log (0=simple message 1=code line plus message 2=detailed stack traces) for skipped failures.: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "detailed skip failure messages level", + "output": "detailed skip failure messages level config.toml: Level to log (0=simple message 1=code line plus message 2=detailed stack traces) for skipped failures.: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Level to log (0=simple message 1=code line plus message 2=detailed stack traces) for skipped failures.: ", + "output": "detailed skip failure messages level config.toml: Level to log (0=simple message 1=code line plus message 2=detailed stack traces) for skipped failures.: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting detailed_skip_failure_messages_level", + "output": "detailed skip failure messages level config.toml: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting detailed_skip_failure_messages_level", + "output": "detailed skip failure messages level config.toml: Level to log (0=simple message 1=code line plus message 2=detailed stack traces) for skipped failures.: How much verbosity to log failure messages for failed and then skipped transformers or models. Full failures always go to disk as *.stack files, which upon completion of experiment goes into details folder within experiment log zip file. " + }, + { + "prompt_type": "plain", + "instruction": ": What does notify_failures do? : notify failures config.toml: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain notify_failures. : notify failures config.toml: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to notify about failures of transformers or models or other recipe failures: . : Set the notify failures config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "notify_failures", + "output": "notify failures config.toml: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "notify_failures", + "output": "notify failures config.toml: Whether to notify about failures of transformers or models or other recipe failures: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "notify failures", + "output": "notify failures config.toml: Whether to notify about failures of transformers or models or other recipe failures: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to notify about failures of transformers or models or other recipe failures: ", + "output": "notify failures config.toml: Whether to notify about failures of transformers or models or other recipe failures: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting notify_failures", + "output": "notify failures config.toml: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting notify_failures", + "output": "notify failures config.toml: Whether to notify about failures of transformers or models or other recipe failures: Whether to not just log errors of recipes (models and transformers) but also show high-level notification in GUI. " + }, + { + "prompt_type": "plain", + "instruction": ": What does config_overrides do? : config overrides config.toml: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain config_overrides. : config overrides config.toml: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Add to config.toml via toml string: . : Set the config overrides config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "config_overrides", + "output": "config overrides config.toml: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "config_overrides", + "output": "config overrides config.toml: Add to config.toml via toml string: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "config overrides", + "output": "config overrides config.toml: Add to config.toml via toml string: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Add to config.toml via toml string: ", + "output": "config overrides config.toml: Add to config.toml via toml string: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting config_overrides", + "output": "config overrides config.toml: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting config_overrides", + "output": "config overrides config.toml: Add to config.toml via toml string: Instructions for 'Add to config.toml via toml string' in GUI expert pageSelf-referential toml parameter, for setting any other toml parameters as string of tomls separated by (spaces around are ok).Useful when toml parameter is not in expert mode but want per-experiment control.Setting this will override all other choices.In expert page, each time expert options saved, the new state is set without memory of any prior settings.The entered item is a fully compliant toml string that would be processed directly by toml.load().One should include 2 double quotes around the entire setting, or double quotes need to be escaped.One enters into the expert page text as follows:e.g. ``enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"on\"``e.g. ``\"\"enable_glm=\"off\" enable_xgboost_gbm=\"off\" enable_lightgbm=\"off\" enable_tensorflow=\"on\"\"\"``e.g. ``fixed_num_individuals=4``e.g. ``params_lightgbm=\"{'objective':'poisson'}\"``e.g. ``\"\"params_lightgbm=\"{'objective':'poisson'}\"\"\"``e.g. ``max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"``e.g. \"\"max_cores=10 data_precision=\"float32\" max_rows_feature_evolution=50000000000 ensemble_accuracy_switch=11 feature_engineering_effort=1 target_transformer=\"identity\" tournament_feature_style_accuracy_switch=5 params_tensorflow=\"{'layers': (100, 100, 100, 100, 100, 100)}\"\"\"If you see: \"toml.TomlDecodeError\" then ensure toml is set correctly.When set in the expert page of an experiment, these changes only affect experiments and not the serverUsually should keep this as empty string in this toml file. " + }, + { + "prompt_type": "plain", + "instruction": ": What does dump_varimp_every_scored_indiv do? : dump varimp every scored indiv config.toml: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "plain", + "instruction": ": Explain dump_varimp_every_scored_indiv. : dump varimp every scored indiv config.toml: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable detailed scored features info: . : Set the dump varimp every scored indiv config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_varimp_every_scored_indiv", + "output": "dump varimp every scored indiv config.toml: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_varimp_every_scored_indiv", + "output": "dump varimp every scored indiv config.toml: Enable detailed scored features info: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump varimp every scored indiv", + "output": "dump varimp every scored indiv config.toml: Enable detailed scored features info: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable detailed scored features info: ", + "output": "dump varimp every scored indiv config.toml: Enable detailed scored features info: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dump_varimp_every_scored_indiv", + "output": "dump varimp every scored indiv config.toml: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dump_varimp_every_scored_indiv", + "output": "dump varimp every scored indiv config.toml: Enable detailed scored features info: Whether to dump every scored individual's variable importance to csv/tabulated/json file produces files like:individual_scored_id%d.iter%d..features.txt for transformed features.individual_scored_id%d.iter%d..features_orig.txt for original features.individual_scored_id%d.iter%d..coefs.txt for absolute importance of transformed features.There are txt, tab.txt, and json formats for some files, and \"best_\" prefix means it is the best individual for that iterationThe hash in the name matches the hash in the files produced by dump_modelparams_every_scored_indiv=true that can be used to track mutation history." + }, + { + "prompt_type": "plain", + "instruction": ": What does dump_modelparams_every_scored_indiv do? : dump modelparams every scored indiv config.toml: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dump_modelparams_every_scored_indiv. : dump modelparams every scored indiv config.toml: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable detailed scored model info: . : Set the dump modelparams every scored indiv config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_every_scored_indiv", + "output": "dump modelparams every scored indiv config.toml: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_every_scored_indiv", + "output": "dump modelparams every scored indiv config.toml: Enable detailed scored model info: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump modelparams every scored indiv", + "output": "dump modelparams every scored indiv config.toml: Enable detailed scored model info: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable detailed scored model info: ", + "output": "dump modelparams every scored indiv config.toml: Enable detailed scored model info: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dump_modelparams_every_scored_indiv", + "output": "dump modelparams every scored indiv config.toml: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dump_modelparams_every_scored_indiv", + "output": "dump modelparams every scored indiv config.toml: Enable detailed scored model info: Whether to dump every scored individual's model parameters to csv/tabulated/json fileproduces files like: individual_scored.params.[txt, csv, json].Each individual has a hash that matches the hash in the filenames produced if dump_varimp_every_scored_indiv=true,and the \"unchanging hash\" is the first parent hash (None if that individual is the first parent itself).These hashes can be used to track the history of the mutations. " + }, + { + "prompt_type": "plain", + "instruction": ": What does dump_modelparams_every_scored_indiv_feature_count do? : dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dump_modelparams_every_scored_indiv_feature_count. : dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_every_scored_indiv_feature_count", + "output": "dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_every_scored_indiv_feature_count", + "output": "dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump modelparams every scored indiv feature count", + "output": "dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dump_modelparams_every_scored_indiv_feature_count", + "output": "dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dump_modelparams_every_scored_indiv_feature_count", + "output": "dump modelparams every scored indiv feature count config.toml: Number of features to show in model dump every scored individual" + }, + { + "prompt_type": "plain", + "instruction": ": What does dump_modelparams_every_scored_indiv_mutation_count do? : dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dump_modelparams_every_scored_indiv_mutation_count. : dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_every_scored_indiv_mutation_count", + "output": "dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_every_scored_indiv_mutation_count", + "output": "dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump modelparams every scored indiv mutation count", + "output": "dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dump_modelparams_every_scored_indiv_mutation_count", + "output": "dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dump_modelparams_every_scored_indiv_mutation_count", + "output": "dump modelparams every scored indiv mutation count config.toml: Number of past mutations to show in model dump every scored individual" + }, + { + "prompt_type": "plain", + "instruction": ": What does dump_modelparams_separate_files do? : dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dump_modelparams_separate_files. : dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_separate_files", + "output": "dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_modelparams_separate_files", + "output": "dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump modelparams separate files", + "output": "dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dump_modelparams_separate_files", + "output": "dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dump_modelparams_separate_files", + "output": "dump modelparams separate files config.toml: Whether to append (false) or have separate files, files like: individual_scored_id%d.iter%d*params*, (true) for modelparams every scored indiv" + }, + { + "prompt_type": "plain", + "instruction": ": What does dump_trans_timings do? : dump trans timings config.toml: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dump_trans_timings. : dump trans timings config.toml: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable detailed logs for timing and types of features produced: . : Set the dump trans timings config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_trans_timings", + "output": "dump trans timings config.toml: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump_trans_timings", + "output": "dump trans timings config.toml: Enable detailed logs for timing and types of features produced: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dump trans timings", + "output": "dump trans timings config.toml: Enable detailed logs for timing and types of features produced: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable detailed logs for timing and types of features produced: ", + "output": "dump trans timings config.toml: Enable detailed logs for timing and types of features produced: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dump_trans_timings", + "output": "dump trans timings config.toml: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dump_trans_timings", + "output": "dump trans timings config.toml: Enable detailed logs for timing and types of features produced: Whether to dump every scored fold's timing and feature info to a *timings*.txt file " + }, + { + "prompt_type": "plain", + "instruction": ": What does delete_preview_trans_timings do? : delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "plain", + "instruction": ": Explain delete_preview_trans_timings. : delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete_preview_trans_timings", + "output": "delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete_preview_trans_timings", + "output": "delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "delete preview trans timings", + "output": "delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting delete_preview_trans_timings", + "output": "delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting delete_preview_trans_timings", + "output": "delete preview trans timings config.toml: whether to delete preview timings if wrote transformer timings" + }, + { + "prompt_type": "plain", + "instruction": ": What does unsupervised_aggregator_n_exemplars do? : unsupervised aggregator n exemplars config.toml: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain unsupervised_aggregator_n_exemplars. : unsupervised aggregator n exemplars config.toml: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of exemplars for unsupervised Aggregator experiments: . : Set the unsupervised aggregator n exemplars config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised_aggregator_n_exemplars", + "output": "unsupervised aggregator n exemplars config.toml: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised_aggregator_n_exemplars", + "output": "unsupervised aggregator n exemplars config.toml: Max. number of exemplars for unsupervised Aggregator experiments: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised aggregator n exemplars", + "output": "unsupervised aggregator n exemplars config.toml: Max. number of exemplars for unsupervised Aggregator experiments: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of exemplars for unsupervised Aggregator experiments: ", + "output": "unsupervised aggregator n exemplars config.toml: Max. number of exemplars for unsupervised Aggregator experiments: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting unsupervised_aggregator_n_exemplars", + "output": "unsupervised aggregator n exemplars config.toml: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting unsupervised_aggregator_n_exemplars", + "output": "unsupervised aggregator n exemplars config.toml: Max. number of exemplars for unsupervised Aggregator experiments: Attempt to create at most this many exemplars (actual rows behaving like cluster centroids) for the Aggregator algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": What does unsupervised_clustering_min_clusters do? : unsupervised clustering min clusters config.toml: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain unsupervised_clustering_min_clusters. : unsupervised clustering min clusters config.toml: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Min. number of clusters for unsupervised clustering experiments: . : Set the unsupervised clustering min clusters config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised_clustering_min_clusters", + "output": "unsupervised clustering min clusters config.toml: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised_clustering_min_clusters", + "output": "unsupervised clustering min clusters config.toml: Min. number of clusters for unsupervised clustering experiments: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised clustering min clusters", + "output": "unsupervised clustering min clusters config.toml: Min. number of clusters for unsupervised clustering experiments: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Min. number of clusters for unsupervised clustering experiments: ", + "output": "unsupervised clustering min clusters config.toml: Min. number of clusters for unsupervised clustering experiments: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting unsupervised_clustering_min_clusters", + "output": "unsupervised clustering min clusters config.toml: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting unsupervised_clustering_min_clusters", + "output": "unsupervised clustering min clusters config.toml: Min. number of clusters for unsupervised clustering experiments: Attempt to create at least this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": What does unsupervised_clustering_max_clusters do? : unsupervised clustering max clusters config.toml: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain unsupervised_clustering_max_clusters. : unsupervised clustering max clusters config.toml: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max. number of clusters for unsupervised clustering experiments: . : Set the unsupervised clustering max clusters config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised_clustering_max_clusters", + "output": "unsupervised clustering max clusters config.toml: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised_clustering_max_clusters", + "output": "unsupervised clustering max clusters config.toml: Max. number of clusters for unsupervised clustering experiments: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "unsupervised clustering max clusters", + "output": "unsupervised clustering max clusters config.toml: Max. number of clusters for unsupervised clustering experiments: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of clusters for unsupervised clustering experiments: ", + "output": "unsupervised clustering max clusters config.toml: Max. number of clusters for unsupervised clustering experiments: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting unsupervised_clustering_max_clusters", + "output": "unsupervised clustering max clusters config.toml: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting unsupervised_clustering_max_clusters", + "output": "unsupervised clustering max clusters config.toml: Max. number of clusters for unsupervised clustering experiments: Attempt to create no more than this many clusters for clustering algorithm in unsupervised experiment mode. " + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_deployment do? : wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_deployment. : wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_deployment", + "output": "wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_deployment", + "output": "wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard deployment", + "output": "wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: ", + "output": "wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_deployment", + "output": "wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_deployment", + "output": "wizard deployment config.toml: Global preset of deployment option for Experiment Wizard. Set to non-empty string to enable.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_repro_level do? : wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_repro_level. : wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_repro_level", + "output": "wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_repro_level", + "output": "wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard repro level", + "output": "wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: ", + "output": "wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_repro_level", + "output": "wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_repro_level", + "output": "wizard repro level config.toml: Global preset of repro level option for Experiment Wizard. Set to 1, 2, 3 to enable.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_sample_size do? : wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_sample_size. : wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_sample_size", + "output": "wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_sample_size", + "output": "wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard sample size", + "output": "wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: ", + "output": "wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_sample_size", + "output": "wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_sample_size", + "output": "wizard sample size config.toml: Max. number of rows for experiment wizard dataset samples. 0 to disable sampling.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_model do? : wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_model. : wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_model", + "output": "wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_model", + "output": "wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard model", + "output": "wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Type of model for experiment wizard to compute variable importances and leakage checks.: ", + "output": "wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_model", + "output": "wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_model", + "output": "wizard model config.toml: Type of model for experiment wizard to compute variable importances and leakage checks.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_max_cols do? : wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_max_cols. : wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_max_cols", + "output": "wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_max_cols", + "output": "wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard max cols", + "output": "wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_max_cols", + "output": "wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_max_cols", + "output": "wizard max cols config.toml: Maximum number of columns to start an experiment. This threshold exists to constraint the # complexity and the length of the Driverless AI's processes." + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_timeout_preview do? : wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_timeout_preview. : wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_preview", + "output": "wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_preview", + "output": "wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard timeout preview", + "output": "wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_timeout_preview", + "output": "wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_timeout_preview", + "output": "wizard timeout preview config.toml: How many seconds to allow preview to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_timeout_leakage do? : wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_timeout_leakage. : wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_leakage", + "output": "wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_leakage", + "output": "wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard timeout leakage", + "output": "wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_timeout_leakage", + "output": "wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_timeout_leakage", + "output": "wizard timeout leakage config.toml: How many seconds to allow leakage detection to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_timeout_dups do? : wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_timeout_dups. : wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_dups", + "output": "wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_dups", + "output": "wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard timeout dups", + "output": "wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_timeout_dups", + "output": "wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_timeout_dups", + "output": "wizard timeout dups config.toml: How many seconds to allow duplicate row detection to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_timeout_varimp do? : wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_timeout_varimp. : wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_varimp", + "output": "wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_varimp", + "output": "wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard timeout varimp", + "output": "wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_timeout_varimp", + "output": "wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_timeout_varimp", + "output": "wizard timeout varimp config.toml: How many seconds to allow variable importance calculation to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": What does wizard_timeout_schema do? : wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": Explain wizard_timeout_schema. : wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_schema", + "output": "wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard_timeout_schema", + "output": "wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "wizard timeout schema", + "output": "wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting wizard_timeout_schema", + "output": "wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting wizard_timeout_schema", + "output": "wizard timeout schema config.toml: How many seconds to allow dataframe schema calculation to take for Wizard." + }, + { + "prompt_type": "plain", + "instruction": ": What does authentication_method do? : authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain authentication_method. : authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication_method", + "output": "authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication_method", + "output": "authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication method", + "output": "authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting authentication_method", + "output": "authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting authentication_method", + "output": "authentication method config.toml: authentication_method unvalidated : Accepts user id and password. Does not validate password. none: Does not ask for user id or password. Authenticated as admin. openid: Users OpenID Connect provider for authentication. See additional OpenID settings below. oidc: Renewed OpenID Connect authentication using authorization code flow. See additional OpenID settings below. pam: Accepts user id and password. Validates user with operating system. ldap: Accepts user id and password. Validates against an ldap server. Look for additional settings under LDAP settings. local: Accepts a user id and password. Validated against an htpasswd file provided in local_htpasswd_file. ibm_spectrum_conductor: Authenticate with IBM conductor auth api. tls_certificate: Authenticate with Driverless by providing a TLS certificate. jwt: Authenticate by JWT obtained from the request metadata. " + }, + { + "prompt_type": "plain", + "instruction": ": What does additional_authentication_methods do? : additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "plain", + "instruction": ": Explain additional_authentication_methods. : additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "additional_authentication_methods", + "output": "additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "additional_authentication_methods", + "output": "additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "additional authentication methods", + "output": "additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting additional_authentication_methods", + "output": "additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting additional_authentication_methods", + "output": "additional authentication methods config.toml: Additional authentication methods that will be enabled for for the clients.Login forms for each method will be available on the``/login/`` path.Comma separated list." + }, + { + "prompt_type": "plain", + "instruction": ": What does authentication_default_timeout_hours do? : authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "plain", + "instruction": ": Explain authentication_default_timeout_hours. : authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication_default_timeout_hours", + "output": "authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication_default_timeout_hours", + "output": "authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication default timeout hours", + "output": "authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting authentication_default_timeout_hours", + "output": "authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting authentication_default_timeout_hours", + "output": "authentication default timeout hours config.toml: The default amount of time in hours before a user is signed out and must log in again. This setting is used when a default timeout value is not provided by ``authentication_method``." + }, + { + "prompt_type": "plain", + "instruction": ": What does authentication_gui_polling_prolongs_session do? : authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "plain", + "instruction": ": Explain authentication_gui_polling_prolongs_session. : authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication_gui_polling_prolongs_session", + "output": "authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication_gui_polling_prolongs_session", + "output": "authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authentication gui polling prolongs session", + "output": "authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting authentication_gui_polling_prolongs_session", + "output": "authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting authentication_gui_polling_prolongs_session", + "output": "authentication gui polling prolongs session config.toml: When enabled, the user's session is automatically prolonged, even when they are not interacting directly with the application." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_provider_base_uri do? : auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_provider_base_uri. : auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_provider_base_uri", + "output": "auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_provider_base_uri", + "output": "auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid provider base uri", + "output": "auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_provider_base_uri", + "output": "auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_provider_base_uri", + "output": "auth openid provider base uri config.toml: OpenID Connect Settings: Refer to the OpenID Connect Basic Client Implementation Guide for details on how OpenID authentication flow works https://openid.net/specs/openid-connect-basic-1_0.html base server URI to the OpenID Provider server (ex: https://oidp.ourdomain.com" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_configuration_uri do? : auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_configuration_uri. : auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_configuration_uri", + "output": "auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_configuration_uri", + "output": "auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid configuration uri", + "output": "auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_configuration_uri", + "output": "auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_configuration_uri", + "output": "auth openid configuration uri config.toml: URI to pull OpenID config data from (you can extract most of required OpenID config from this url) usually located at: /auth/realms/master/.well-known/openid-configuration" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_auth_uri do? : auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_auth_uri. : auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_auth_uri", + "output": "auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_auth_uri", + "output": "auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid auth uri", + "output": "auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_auth_uri", + "output": "auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_auth_uri", + "output": "auth openid auth uri config.toml: URI to start authentication flow" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_token_uri do? : auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_token_uri. : auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_token_uri", + "output": "auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_token_uri", + "output": "auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid token uri", + "output": "auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_token_uri", + "output": "auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_token_uri", + "output": "auth openid token uri config.toml: URI to make request for token after callback from OpenID server was received" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_userinfo_uri do? : auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_userinfo_uri. : auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_uri", + "output": "auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_uri", + "output": "auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid userinfo uri", + "output": "auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_userinfo_uri", + "output": "auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_userinfo_uri", + "output": "auth openid userinfo uri config.toml: URI to get user information once access_token has been acquired (ex: list of groups user belongs to will be provided here)" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_logout_uri do? : auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_logout_uri. : auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_logout_uri", + "output": "auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_logout_uri", + "output": "auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid logout uri", + "output": "auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_logout_uri", + "output": "auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_logout_uri", + "output": "auth openid logout uri config.toml: URI to logout user" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_redirect_uri do? : auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_redirect_uri. : auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_redirect_uri", + "output": "auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_redirect_uri", + "output": "auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid redirect uri", + "output": "auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_redirect_uri", + "output": "auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_redirect_uri", + "output": "auth openid redirect uri config.toml: callback URI that OpenID provide will use to send 'authentication_code' This is OpenID callback endpoint in Driverless AI. Most OpenID providers need this to be HTTPs. (ex. https://driverless.ourdomin.com/openid/callback)" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_grant_type do? : auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_grant_type. : auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_grant_type", + "output": "auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_grant_type", + "output": "auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid grant type", + "output": "auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_grant_type", + "output": "auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_grant_type", + "output": "auth openid grant type config.toml: OAuth2 grant type (usually authorization_code for OpenID, can be access_token also)" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_response_type do? : auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_response_type. : auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_response_type", + "output": "auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_response_type", + "output": "auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid response type", + "output": "auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_response_type", + "output": "auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_response_type", + "output": "auth openid response type config.toml: OAuth2 response type (usually code)" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_client_id do? : auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_client_id. : auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_client_id", + "output": "auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_client_id", + "output": "auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid client id", + "output": "auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_client_id", + "output": "auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_client_id", + "output": "auth openid client id config.toml: Client ID registered with OpenID provider" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_client_secret do? : auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_client_secret. : auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_client_secret", + "output": "auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_client_secret", + "output": "auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid client secret", + "output": "auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_client_secret", + "output": "auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_client_secret", + "output": "auth openid client secret config.toml: Client secret provided by OpenID provider when registering Client ID" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_scope do? : auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_scope. : auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_scope", + "output": "auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_scope", + "output": "auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid scope", + "output": "auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_scope", + "output": "auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_scope", + "output": "auth openid scope config.toml: Scope of info (usually openid). Can be list of more than one, space delimited, possible values listed at https://openid.net/specs/openid-connect-basic-1_0.html#Scopes " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_userinfo_auth_key do? : auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_userinfo_auth_key. : auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_auth_key", + "output": "auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_auth_key", + "output": "auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid userinfo auth key", + "output": "auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_userinfo_auth_key", + "output": "auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_userinfo_auth_key", + "output": "auth openid userinfo auth key config.toml: What key in user_info JSON should we check to authorize user" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_userinfo_auth_value do? : auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_userinfo_auth_value. : auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_auth_value", + "output": "auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_auth_value", + "output": "auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid userinfo auth value", + "output": "auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_userinfo_auth_value", + "output": "auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_userinfo_auth_value", + "output": "auth openid userinfo auth value config.toml: What value should the key have in user_info JSON in order to authorize user" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_userinfo_username_key do? : auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_userinfo_username_key. : auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_username_key", + "output": "auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_userinfo_username_key", + "output": "auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid userinfo username key", + "output": "auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_userinfo_username_key", + "output": "auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_userinfo_username_key", + "output": "auth openid userinfo username key config.toml: Key that specifies username in user_info JSON (we will use the value of this key as username in Driverless AI)" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_urlencode_quote_via do? : auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_urlencode_quote_via. : auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_urlencode_quote_via", + "output": "auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_urlencode_quote_via", + "output": "auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid urlencode quote via", + "output": "auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_urlencode_quote_via", + "output": "auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_urlencode_quote_via", + "output": "auth openid urlencode quote via config.toml: Quote method from urllib.parse used to encode payload dict in Authentication Request" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_access_token_expiry_key do? : auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_access_token_expiry_key. : auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_access_token_expiry_key", + "output": "auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_access_token_expiry_key", + "output": "auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid access token expiry key", + "output": "auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_access_token_expiry_key", + "output": "auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_access_token_expiry_key", + "output": "auth openid access token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_refresh_token_expiry_key do? : auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_refresh_token_expiry_key. : auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_refresh_token_expiry_key", + "output": "auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_refresh_token_expiry_key", + "output": "auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid refresh token expiry key", + "output": "auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_refresh_token_expiry_key", + "output": "auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_refresh_token_expiry_key", + "output": "auth openid refresh token expiry key config.toml: Key in Token Response JSON that holds the value for access token expiry" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_token_expiration_secs do? : auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_token_expiration_secs. : auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_token_expiration_secs", + "output": "auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_token_expiration_secs", + "output": "auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid token expiration secs", + "output": "auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_token_expiration_secs", + "output": "auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_token_expiration_secs", + "output": "auth openid token expiration secs config.toml: Expiration time in seconds for access token" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_use_objectpath_match do? : auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_use_objectpath_match. : auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_use_objectpath_match", + "output": "auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_use_objectpath_match", + "output": "auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid use objectpath match", + "output": "auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_use_objectpath_match", + "output": "auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_use_objectpath_match", + "output": "auth openid use objectpath match config.toml: Enables advanced matching for OpenID Connect authentication. When enabled ObjectPath () expression is used to evaluate the user identity. " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_use_objectpath_expression do? : auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_use_objectpath_expression. : auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_use_objectpath_expression", + "output": "auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_use_objectpath_expression", + "output": "auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid use objectpath expression", + "output": "auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_use_objectpath_expression", + "output": "auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_use_objectpath_expression", + "output": "auth openid use objectpath expression config.toml: ObjectPath () expression that will be used to evaluate whether user is allowed to login into Driverless. Any expression that evaluates to True means user is allowed to log in. Examples: Simple claim equality: `$.our_claim is \"our_value\"` List of claims contains required value: `\"expected_role\" in @.roles` " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_token_introspection_url do? : auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_token_introspection_url. : auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_token_introspection_url", + "output": "auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_token_introspection_url", + "output": "auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid token introspection url", + "output": "auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_token_introspection_url", + "output": "auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_token_introspection_url", + "output": "auth openid token introspection url config.toml: Sets token introspection URL for OpenID Connect authentication. (needs to be an absolute URL) Needs to be set when API token introspection is enabled. Is used to get the token TTL when set and IDP does not provide expires_in field in the token endpoint response." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_end_session_endpoint_url do? : auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_end_session_endpoint_url. : auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_end_session_endpoint_url", + "output": "auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_end_session_endpoint_url", + "output": "auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid end session endpoint url", + "output": "auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_end_session_endpoint_url", + "output": "auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_end_session_endpoint_url", + "output": "auth openid end session endpoint url config.toml: Sets an URL where the user is being redirected after being logged out when set. (needs to be an absolute URL)" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_openid_default_scopes do? : auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_openid_default_scopes. : auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_default_scopes", + "output": "auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_openid_default_scopes", + "output": "auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth openid default scopes", + "output": "auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_openid_default_scopes", + "output": "auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_openid_default_scopes", + "output": "auth openid default scopes config.toml: If set, server will use these scopes when it asks for the token on the login. (space separated list)" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_identity_source do? : auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_identity_source. : auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_identity_source", + "output": "auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_identity_source", + "output": "auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc identity source", + "output": "auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_identity_source", + "output": "auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_identity_source", + "output": "auth oidc identity source config.toml: Specifies the source from which user identity and username is retrieved. Currently supported sources are: user_info: Retrieves username from UserInfo endpoint response id_token: Retrieves username from ID Token using `auth_openid_id_token_username_key` claim " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_username_claim do? : auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_username_claim. : auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_username_claim", + "output": "auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_username_claim", + "output": "auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc username claim", + "output": "auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_username_claim", + "output": "auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_username_claim", + "output": "auth oidc username claim config.toml: Claim of preferred username in a message holding the user identity, which will be used as a username in application. The user identity source is specified by `auth_oidc_identity_source`, and can be e.g. UserInfo endpoint response or ID Token" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_issuer_url do? : auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_issuer_url. : auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_issuer_url", + "output": "auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_issuer_url", + "output": "auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc issuer url", + "output": "auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_issuer_url", + "output": "auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_issuer_url", + "output": "auth oidc issuer url config.toml: OpenID-Connect Issuer URL, which is used for automatic provider infodiscovery. E.g. https://login.microsoftonline.com//v2.0" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_token_endpoint_url do? : auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_token_endpoint_url. : auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_token_endpoint_url", + "output": "auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_token_endpoint_url", + "output": "auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc token endpoint url", + "output": "auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_token_endpoint_url", + "output": "auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_token_endpoint_url", + "output": "auth oidc token endpoint url config.toml: OpenID-Connect Token endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_introspection_endpoint_url do? : auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_introspection_endpoint_url. : auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_introspection_endpoint_url", + "output": "auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_introspection_endpoint_url", + "output": "auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc introspection endpoint url", + "output": "auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_introspection_endpoint_url", + "output": "auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_introspection_endpoint_url", + "output": "auth oidc introspection endpoint url config.toml: OpenID-Connect Token introspection endpoint URL. Setting this is optional and if it's empty, it'll be automatically set by provider info discovery." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_post_logout_url do? : auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_post_logout_url. : auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_post_logout_url", + "output": "auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_post_logout_url", + "output": "auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc post logout url", + "output": "auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_post_logout_url", + "output": "auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_post_logout_url", + "output": "auth oidc post logout url config.toml: Absolute URL to which user is redirected, after they log out from the application, in case OIDC authentication is used. Usually this is absolute URL of DriverlessAI Login page e.g. https://1.2.3.4:12345/login" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_authorization_query_params do? : auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_authorization_query_params. : auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_authorization_query_params", + "output": "auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_authorization_query_params", + "output": "auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc authorization query params", + "output": "auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_authorization_query_params", + "output": "auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_authorization_query_params", + "output": "auth oidc authorization query params config.toml: Key-value mapping of extra HTTP query parameters in an OIDC authorization request." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_skip_cert_verification do? : auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_skip_cert_verification. : auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_skip_cert_verification", + "output": "auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_skip_cert_verification", + "output": "auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc skip cert verification", + "output": "auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_skip_cert_verification", + "output": "auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_skip_cert_verification", + "output": "auth oidc skip cert verification config.toml: When set to True, will skip cert verification." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_oidc_ca_cert_location do? : auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_oidc_ca_cert_location. : auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_ca_cert_location", + "output": "auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_oidc_ca_cert_location", + "output": "auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth oidc ca cert location", + "output": "auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_oidc_ca_cert_location", + "output": "auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_oidc_ca_cert_location", + "output": "auth oidc ca cert location config.toml: When set will use this value as the location for the CA cert, this takes precedence over auth_oidc_skip_cert_verification." + }, + { + "prompt_type": "plain", + "instruction": ": What does api_token_introspection_enabled do? : api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "plain", + "instruction": ": Explain api_token_introspection_enabled. : api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_introspection_enabled", + "output": "api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_introspection_enabled", + "output": "api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api token introspection enabled", + "output": "api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting api_token_introspection_enabled", + "output": "api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting api_token_introspection_enabled", + "output": "api token introspection enabled config.toml: Enables option to use Bearer token for authentication with the RPC endpoint." + }, + { + "prompt_type": "plain", + "instruction": ": What does api_token_introspection_method do? : api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain api_token_introspection_method. : api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_introspection_method", + "output": "api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_introspection_method", + "output": "api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api token introspection method", + "output": "api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting api_token_introspection_method", + "output": "api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting api_token_introspection_method", + "output": "api token introspection method config.toml: Sets the method that is used to introspect the bearer token. OAUTH2_TOKEN_INTROSPECTION: Uses OAuth 2.0 Token Introspection (RPC 7662) endpoint to introspect the bearer token. This useful when 'openid' is used as the authentication method. Uses 'auth_openid_client_id' and 'auth_openid_client_secret' and to authenticate with the authorization server and `auth_openid_token_introspection_url` to perform the introspection. " + }, + { + "prompt_type": "plain", + "instruction": ": What does api_token_oauth2_scopes do? : api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain api_token_oauth2_scopes. : api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_oauth2_scopes", + "output": "api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_oauth2_scopes", + "output": "api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api token oauth2 scopes", + "output": "api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting api_token_oauth2_scopes", + "output": "api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting api_token_oauth2_scopes", + "output": "api token oauth2 scopes config.toml: Sets the minimum of the scopes that the access token needs to have in order to pass the introspection. Space separated./ This is passed to the introspection endpoint and also verified after response for the servers that don't enforce scopes. Keeping this empty turns any the verification off. " + }, + { + "prompt_type": "plain", + "instruction": ": What does api_token_oauth2_username_field_name do? : api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "plain", + "instruction": ": Explain api_token_oauth2_username_field_name. : api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_oauth2_username_field_name", + "output": "api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api_token_oauth2_username_field_name", + "output": "api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "api token oauth2 username field name", + "output": "api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting api_token_oauth2_username_field_name", + "output": "api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting api_token_oauth2_username_field_name", + "output": "api token oauth2 username field name config.toml: Which field of the response returned by the token introspection endpoint should be used as a username." + }, + { + "prompt_type": "plain", + "instruction": ": What does oauth2_client_tokens_enabled do? : oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "plain", + "instruction": ": Explain oauth2_client_tokens_enabled. : oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_enabled", + "output": "oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_enabled", + "output": "oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2 client tokens enabled", + "output": "oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting oauth2_client_tokens_enabled", + "output": "oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting oauth2_client_tokens_enabled", + "output": "oauth2 client tokens enabled config.toml: Enables the option to initiate a PKCE flow from the UI in order to obtaintokens usable with Driverless clients" + }, + { + "prompt_type": "plain", + "instruction": ": What does oauth2_client_tokens_client_id do? : oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "plain", + "instruction": ": Explain oauth2_client_tokens_client_id. : oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_client_id", + "output": "oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_client_id", + "output": "oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2 client tokens client id", + "output": "oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting oauth2_client_tokens_client_id", + "output": "oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting oauth2_client_tokens_client_id", + "output": "oauth2 client tokens client id config.toml: Sets up client id that will be used in the OAuth 2.0 Authorization Code Flow to obtain the tokens. Client needs to be public and be able to use PKCE with S256 code challenge." + }, + { + "prompt_type": "plain", + "instruction": ": What does oauth2_client_tokens_authorize_url do? : oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "plain", + "instruction": ": Explain oauth2_client_tokens_authorize_url. : oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_authorize_url", + "output": "oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_authorize_url", + "output": "oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2 client tokens authorize url", + "output": "oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting oauth2_client_tokens_authorize_url", + "output": "oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting oauth2_client_tokens_authorize_url", + "output": "oauth2 client tokens authorize url config.toml: Sets up the absolute url to the authorize endpoint." + }, + { + "prompt_type": "plain", + "instruction": ": What does oauth2_client_tokens_token_url do? : oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "plain", + "instruction": ": Explain oauth2_client_tokens_token_url. : oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_token_url", + "output": "oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_token_url", + "output": "oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2 client tokens token url", + "output": "oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting oauth2_client_tokens_token_url", + "output": "oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting oauth2_client_tokens_token_url", + "output": "oauth2 client tokens token url config.toml: Sets up the absolute url to the token endpoint." + }, + { + "prompt_type": "plain", + "instruction": ": What does oauth2_client_tokens_introspection_url do? : oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "plain", + "instruction": ": Explain oauth2_client_tokens_introspection_url. : oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_introspection_url", + "output": "oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_introspection_url", + "output": "oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2 client tokens introspection url", + "output": "oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting oauth2_client_tokens_introspection_url", + "output": "oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting oauth2_client_tokens_introspection_url", + "output": "oauth2 client tokens introspection url config.toml: Sets up the absolute url to the token introspection endpoint.It's displayed in the UI so that clients can inspect the token expiration." + }, + { + "prompt_type": "plain", + "instruction": ": What does oauth2_client_tokens_redirect_url do? : oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "plain", + "instruction": ": Explain oauth2_client_tokens_redirect_url. : oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_redirect_url", + "output": "oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_redirect_url", + "output": "oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2 client tokens redirect url", + "output": "oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting oauth2_client_tokens_redirect_url", + "output": "oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting oauth2_client_tokens_redirect_url", + "output": "oauth2 client tokens redirect url config.toml: Sets up the absolute to the redirect url where Driverless handles the redirect part of the Authorization Code Flow. this /oauth2/client_token" + }, + { + "prompt_type": "plain", + "instruction": ": What does oauth2_client_tokens_scope do? : oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "plain", + "instruction": ": Explain oauth2_client_tokens_scope. : oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_scope", + "output": "oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2_client_tokens_scope", + "output": "oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "oauth2 client tokens scope", + "output": "oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting oauth2_client_tokens_scope", + "output": "oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting oauth2_client_tokens_scope", + "output": "oauth2 client tokens scope config.toml: Sets up the scope for the requested tokens. Space seprated list." + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_server do? : ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_server. : ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_server", + "output": "ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_server", + "output": "ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap server", + "output": "ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_server", + "output": "ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_server", + "output": "ldap server config.toml: ldap server domain or ip" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_port do? : ldap port config.toml: ldap server port" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_port. : ldap port config.toml: ldap server port" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_port", + "output": "ldap port config.toml: ldap server port" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_port", + "output": "ldap port config.toml: ldap server port" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap port", + "output": "ldap port config.toml: ldap server port" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap port config.toml: ldap server port" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_port", + "output": "ldap port config.toml: ldap server port" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_port", + "output": "ldap port config.toml: ldap server port" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_bind_dn do? : ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_bind_dn. : ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_bind_dn", + "output": "ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_bind_dn", + "output": "ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap bind dn", + "output": "ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_bind_dn", + "output": "ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_bind_dn", + "output": "ldap bind dn config.toml: Complete DN of the LDAP bind user" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_bind_password do? : ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_bind_password. : ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_bind_password", + "output": "ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_bind_password", + "output": "ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap bind password", + "output": "ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_bind_password", + "output": "ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_bind_password", + "output": "ldap bind password config.toml: Password for the LDAP bind" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_tls_file do? : ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_tls_file. : ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_tls_file", + "output": "ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_tls_file", + "output": "ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap tls file", + "output": "ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_tls_file", + "output": "ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_tls_file", + "output": "ldap tls file config.toml: Provide Cert file location" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_use_ssl do? : ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_use_ssl. : ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_use_ssl", + "output": "ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_use_ssl", + "output": "ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap use ssl", + "output": "ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_use_ssl", + "output": "ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_use_ssl", + "output": "ldap use ssl config.toml: use true to use ssl or false" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_search_base do? : ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_search_base. : ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_base", + "output": "ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_base", + "output": "ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap search base", + "output": "ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_search_base", + "output": "ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_search_base", + "output": "ldap search base config.toml: the location in the DIT where the search will start" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_search_filter do? : ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_search_filter. : ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_filter", + "output": "ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_filter", + "output": "ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap search filter", + "output": "ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_search_filter", + "output": "ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_search_filter", + "output": "ldap search filter config.toml: A string that describes what you are searching for. You can use Pythonsubstitution to have this constructed dynamically.(only {{DAI_USERNAME}} is supported)" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_search_attributes do? : ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_search_attributes. : ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_attributes", + "output": "ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_attributes", + "output": "ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap search attributes", + "output": "ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_search_attributes", + "output": "ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_search_attributes", + "output": "ldap search attributes config.toml: ldap attributes to return from search" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_user_name_attribute do? : ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_user_name_attribute. : ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_user_name_attribute", + "output": "ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_user_name_attribute", + "output": "ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap user name attribute", + "output": "ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_user_name_attribute", + "output": "ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_user_name_attribute", + "output": "ldap user name attribute config.toml: specify key to find user name" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_recipe do? : ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_recipe. : ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_recipe", + "output": "ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_recipe", + "output": "ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap recipe", + "output": "ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_recipe", + "output": "ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_recipe", + "output": "ldap recipe config.toml: When using this recipe, needs to be set to \"1\"" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_user_prefix do? : ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_user_prefix. : ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_user_prefix", + "output": "ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_user_prefix", + "output": "ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap user prefix", + "output": "ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_user_prefix", + "output": "ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_user_prefix", + "output": "ldap user prefix config.toml: Deprecated do not use" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_search_user_id do? : ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_search_user_id. : ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_user_id", + "output": "ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_user_id", + "output": "ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap search user id", + "output": "ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_search_user_id", + "output": "ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_search_user_id", + "output": "ldap search user id config.toml: Deprecated, Use ldap_bind_dn" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_search_password do? : ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_search_password. : ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_password", + "output": "ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_search_password", + "output": "ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap search password", + "output": "ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_search_password", + "output": "ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_search_password", + "output": "ldap search password config.toml: Deprecated, ldap_bind_password" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_ou_dn do? : ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_ou_dn. : ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_ou_dn", + "output": "ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_ou_dn", + "output": "ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap ou dn", + "output": "ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_ou_dn", + "output": "ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_ou_dn", + "output": "ldap ou dn config.toml: Deprecated, use ldap_search_base instead" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_dc do? : ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_dc. : ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_dc", + "output": "ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_dc", + "output": "ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap dc", + "output": "ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_dc", + "output": "ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_dc", + "output": "ldap dc config.toml: Deprecated, use ldap_base_dn" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_base_dn do? : ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_base_dn. : ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_base_dn", + "output": "ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_base_dn", + "output": "ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap base dn", + "output": "ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_base_dn", + "output": "ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_base_dn", + "output": "ldap base dn config.toml: Deprecated, use ldap_search_base" + }, + { + "prompt_type": "plain", + "instruction": ": What does ldap_base_filter do? : ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ldap_base_filter. : ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_base_filter", + "output": "ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap_base_filter", + "output": "ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ldap base filter", + "output": "ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ldap_base_filter", + "output": "ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ldap_base_filter", + "output": "ldap base filter config.toml: Deprecated, use ldap_search_filter" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_crl_file do? : auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_crl_file. : auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_crl_file", + "output": "auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_crl_file", + "output": "auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls crl file", + "output": "auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_crl_file", + "output": "auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_crl_file", + "output": "auth tls crl file config.toml: Path to the CRL file that will be used to verify client certificate." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_subject_field do? : auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_subject_field. : auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_subject_field", + "output": "auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_subject_field", + "output": "auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls subject field", + "output": "auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_subject_field", + "output": "auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_subject_field", + "output": "auth tls subject field config.toml: What field of the subject would used as source for username or other values used for further validation." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_field_parse_regexp do? : auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_field_parse_regexp. : auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_field_parse_regexp", + "output": "auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_field_parse_regexp", + "output": "auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls field parse regexp", + "output": "auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_field_parse_regexp", + "output": "auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_field_parse_regexp", + "output": "auth tls field parse regexp config.toml: Regular expression that will be used to parse subject field to obtain the username or other values used for further validation." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_user_lookup do? : auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_user_lookup. : auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_user_lookup", + "output": "auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_user_lookup", + "output": "auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls user lookup", + "output": "auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_user_lookup", + "output": "auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_user_lookup", + "output": "auth tls user lookup config.toml: Sets up the way how user identity would be obtained REGEXP_ONLY: Will use 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' to extract the username from the client certificate. LDAP_LOOKUP: Will use LDAP server to lookup for the username. 'auth_tls_ldap_server', 'auth_tls_ldap_port', 'auth_tls_ldap_use_ssl', 'auth_tls_ldap_tls_file', 'auth_tls_ldap_bind_dn', 'auth_tls_ldap_bind_password' options are used to establish the connection with the LDAP server. 'auth_tls_subject_field' and 'auth_tls_field_parse_regexp' options are used to parse the certificate. 'auth_tls_ldap_search_base', 'auth_tls_ldap_search_filter', and 'auth_tls_ldap_username_attribute' options are used to do the lookup. " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_server do? : auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_server. : auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_server", + "output": "auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_server", + "output": "auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap server", + "output": "auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_server", + "output": "auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_server", + "output": "auth tls ldap server config.toml: Hostname or IP address of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_port do? : auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_port. : auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_port", + "output": "auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_port", + "output": "auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap port", + "output": "auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_port", + "output": "auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_port", + "output": "auth tls ldap port config.toml: Port of the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_use_ssl do? : auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_use_ssl. : auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_use_ssl", + "output": "auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_use_ssl", + "output": "auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap use ssl", + "output": "auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_use_ssl", + "output": "auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_use_ssl", + "output": "auth tls ldap use ssl config.toml: Whether to SSL to when connecting to the LDAP server used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_tls_file do? : auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_tls_file. : auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_tls_file", + "output": "auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_tls_file", + "output": "auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap tls file", + "output": "auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_tls_file", + "output": "auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_tls_file", + "output": "auth tls ldap tls file config.toml: Path to the SSL certificate used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_bind_dn do? : auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_bind_dn. : auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_bind_dn", + "output": "auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_bind_dn", + "output": "auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap bind dn", + "output": "auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_bind_dn", + "output": "auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_bind_dn", + "output": "auth tls ldap bind dn config.toml: Complete DN of the LDAP bind user used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_bind_password do? : auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_bind_password. : auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_bind_password", + "output": "auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_bind_password", + "output": "auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap bind password", + "output": "auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_bind_password", + "output": "auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_bind_password", + "output": "auth tls ldap bind password config.toml: Password for the LDAP bind used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_search_base do? : auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_search_base. : auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_search_base", + "output": "auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_search_base", + "output": "auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap search base", + "output": "auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_search_base", + "output": "auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_search_base", + "output": "auth tls ldap search base config.toml: Location in the DIT where the search will start used with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_search_filter do? : auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_search_filter. : auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_search_filter", + "output": "auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_search_filter", + "output": "auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap search filter", + "output": "auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_search_filter", + "output": "auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_search_filter", + "output": "auth tls ldap search filter config.toml: LDAP filter that will be used to lookup for the user with LDAP_LOOKUP with 'tls_certificate' authentication method. Can be built dynamically using the named capturing groups from the 'auth_tls_field_parse_regexp' for substitution. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``auth_tls_ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_username_attribute do? : auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_username_attribute. : auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_username_attribute", + "output": "auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_username_attribute", + "output": "auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap username attribute", + "output": "auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_username_attribute", + "output": "auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_username_attribute", + "output": "auth tls ldap username attribute config.toml: Specified what LDAP record attribute will be used as username with LDAP_LOOKUP with 'tls_certificate' authentication method." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_authorization_lookup_filter do? : auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_authorization_lookup_filter. : auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_authorization_lookup_filter", + "output": "auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_authorization_lookup_filter", + "output": "auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap authorization lookup filter", + "output": "auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_authorization_lookup_filter", + "output": "auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_authorization_lookup_filter", + "output": "auth tls ldap authorization lookup filter config.toml: Sets optional additional lookup filter that is performed after the user is found. This can be used for example to check whether the is member of particular group. Filter can be built dynamically from the attributes returned by the lookup. Authorization fails when search does not return any entry. If one ore more entries are returned authorization succeeds. Example: ``auth_tls_field_parse_regexp=\"\\w+ (?P\\d+)\"`` ``ldap_search_filter=\"(&(objectClass=person)(id={{id}}))\"`` ``auth_tls_ldap_authorization_lookup_filter=\"(&(objectClass=group)(member=uid={{uid}},dc=example,dc=com))\"`` If this option is empty no additional lookup is done and just a successful user lookup is enough to authorize the user. " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_tls_ldap_authorization_search_base do? : auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_tls_ldap_authorization_search_base. : auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_authorization_search_base", + "output": "auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_tls_ldap_authorization_search_base", + "output": "auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth tls ldap authorization search base", + "output": "auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_tls_ldap_authorization_search_base", + "output": "auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_tls_ldap_authorization_search_base", + "output": "auth tls ldap authorization search base config.toml: Base DN where to start the Authorization lookup. Used when 'auth_tls_ldap_authorization_lookup_filter' is set." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_token_source do? : auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_token_source. : auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_token_source", + "output": "auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_token_source", + "output": "auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt token source", + "output": "auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_token_source", + "output": "auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_token_source", + "output": "auth jwt token source config.toml: Sets up the way how the token will picked from the request COOKIE: Will use 'auth_jwt_cookie_name' cookie content parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. HEADER: Will use 'auth_jwt_header_name' header value parsed with 'auth_jwt_source_parse_regexp' to obtain the token content. " + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_cookie_name do? : auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_cookie_name. : auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_cookie_name", + "output": "auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_cookie_name", + "output": "auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt cookie name", + "output": "auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_cookie_name", + "output": "auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_cookie_name", + "output": "auth jwt cookie name config.toml: Specifies name of the cookie that will be used to obtain JWT." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_header_name do? : auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_header_name. : auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_header_name", + "output": "auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_header_name", + "output": "auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt header name", + "output": "auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_header_name", + "output": "auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_header_name", + "output": "auth jwt header name config.toml: Specifies name http header that will be used to obtain JWT" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_source_parse_regexp do? : auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_source_parse_regexp. : auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_source_parse_regexp", + "output": "auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_source_parse_regexp", + "output": "auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt source parse regexp", + "output": "auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_source_parse_regexp", + "output": "auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_source_parse_regexp", + "output": "auth jwt source parse regexp config.toml: Regular expression that will be used to parse JWT source. Expression is in Python syntax and must contain named group 'token' with capturing the token value." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_username_claim_name do? : auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_username_claim_name. : auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_username_claim_name", + "output": "auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_username_claim_name", + "output": "auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt username claim name", + "output": "auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_username_claim_name", + "output": "auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_username_claim_name", + "output": "auth jwt username claim name config.toml: Which JWT claim will be used as username for Driverless." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_verify do? : auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_verify. : auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_verify", + "output": "auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_verify", + "output": "auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt verify", + "output": "auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_verify", + "output": "auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_verify", + "output": "auth jwt verify config.toml: Whether to verify the signature of the JWT." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_algorithm do? : auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_algorithm. : auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_algorithm", + "output": "auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_algorithm", + "output": "auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt algorithm", + "output": "auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_algorithm", + "output": "auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_algorithm", + "output": "auth jwt algorithm config.toml: Signature algorithm that will be used to verify the signature according to RFC 7518." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_secret do? : auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_secret. : auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_secret", + "output": "auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_secret", + "output": "auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt secret", + "output": "auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_secret", + "output": "auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_secret", + "output": "auth jwt secret config.toml: Specifies the secret content for HMAC or public key for RSA and DSA signature algorithms." + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_exp_leeway_seconds do? : auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_exp_leeway_seconds. : auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_exp_leeway_seconds", + "output": "auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_exp_leeway_seconds", + "output": "auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt exp leeway seconds", + "output": "auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_exp_leeway_seconds", + "output": "auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_exp_leeway_seconds", + "output": "auth jwt exp leeway seconds config.toml: Number of seconds after JWT still can be accepted if when already expired" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_required_audience do? : auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_required_audience. : auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_required_audience", + "output": "auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_required_audience", + "output": "auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt required audience", + "output": "auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_required_audience", + "output": "auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_required_audience", + "output": "auth jwt required audience config.toml: List of accepted 'aud' claims for the JWTs. When empty, anyaudience is accepted" + }, + { + "prompt_type": "plain", + "instruction": ": What does auth_jwt_required_issuer do? : auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "plain", + "instruction": ": Explain auth_jwt_required_issuer. : auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_required_issuer", + "output": "auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth_jwt_required_issuer", + "output": "auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "auth jwt required issuer", + "output": "auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting auth_jwt_required_issuer", + "output": "auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting auth_jwt_required_issuer", + "output": "auth jwt required issuer config.toml: Value of the 'iss' claim that JWTs need to have in order to be accepted." + }, + { + "prompt_type": "plain", + "instruction": ": What does local_htpasswd_file do? : local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "plain", + "instruction": ": Explain local_htpasswd_file. : local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local_htpasswd_file", + "output": "local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local_htpasswd_file", + "output": "local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local htpasswd file", + "output": "local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting local_htpasswd_file", + "output": "local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting local_htpasswd_file", + "output": "local htpasswd file config.toml: Local password file Generating a htpasswd file: see syntax below ``htpasswd -B '' ''`` note: -B forces use of brcypt, a secure encryption method" + }, + { + "prompt_type": "plain", + "instruction": ": What does authorization_service do? : authorization service config.toml: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "plain", + "instruction": ": Explain authorization_service. : authorization service config.toml: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Authorization service name: . : Set the authorization service config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authorization_service", + "output": "authorization service config.toml: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authorization_service", + "output": "authorization service config.toml: Authorization service name: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "authorization service", + "output": "authorization service config.toml: Authorization service name: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Authorization service name: ", + "output": "authorization service config.toml: Authorization service name: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting authorization_service", + "output": "authorization service config.toml: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting authorization_service", + "output": "authorization service config.toml: Authorization service name: Authorization service name: * local: Authorization is based on config.toml settings such as `local_administrator_list`" + }, + { + "prompt_type": "plain", + "instruction": ": What does local_administrator_list do? : local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain local_administrator_list. : local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local_administrator_list", + "output": "local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local_administrator_list", + "output": "local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local administrator list", + "output": "local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "List of usernames with admin rights: ", + "output": "local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting local_administrator_list", + "output": "local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting local_administrator_list", + "output": "local administrator list config.toml: List of usernames with admin rights: " + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_report_name do? : autodoc report name config.toml: Specify the name of the report." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_report_name. : autodoc report name config.toml: Specify the name of the report." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: AutoDoc Name: . : Set the autodoc report name config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_report_name", + "output": "autodoc report name config.toml: Specify the name of the report." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_report_name", + "output": "autodoc report name config.toml: AutoDoc Name: Specify the name of the report." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc report name", + "output": "autodoc report name config.toml: AutoDoc Name: Specify the name of the report." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "AutoDoc Name: ", + "output": "autodoc report name config.toml: AutoDoc Name: Specify the name of the report." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_report_name", + "output": "autodoc report name config.toml: Specify the name of the report." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_report_name", + "output": "autodoc report name config.toml: AutoDoc Name: Specify the name of the report." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_template do? : autodoc template config.toml: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_template. : autodoc template config.toml: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: AutoDoc Template Location: . : Set the autodoc template config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_template", + "output": "autodoc template config.toml: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_template", + "output": "autodoc template config.toml: AutoDoc Template Location: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc template", + "output": "autodoc template config.toml: AutoDoc Template Location: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "AutoDoc Template Location: ", + "output": "autodoc template config.toml: AutoDoc Template Location: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_template", + "output": "autodoc template config.toml: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_template", + "output": "autodoc template config.toml: AutoDoc Template Location: AutoDoc template path. Provide the full path to your custom AutoDoc template or leave as 'default'to generate the standard AutoDoc." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_additional_template_folder do? : autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_additional_template_folder. : autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_additional_template_folder", + "output": "autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_additional_template_folder", + "output": "autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc additional template folder", + "output": "autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_additional_template_folder", + "output": "autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_additional_template_folder", + "output": "autodoc additional template folder config.toml: Location of the additional AutoDoc templates" + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_output_type do? : autodoc output type config.toml: Specify the AutoDoc output type." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_output_type. : autodoc output type config.toml: Specify the AutoDoc output type." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: AutoDoc File Output Type: . : Set the autodoc output type config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_output_type", + "output": "autodoc output type config.toml: Specify the AutoDoc output type." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_output_type", + "output": "autodoc output type config.toml: AutoDoc File Output Type: Specify the AutoDoc output type." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc output type", + "output": "autodoc output type config.toml: AutoDoc File Output Type: Specify the AutoDoc output type." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "AutoDoc File Output Type: ", + "output": "autodoc output type config.toml: AutoDoc File Output Type: Specify the AutoDoc output type." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_output_type", + "output": "autodoc output type config.toml: Specify the AutoDoc output type." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_output_type", + "output": "autodoc output type config.toml: AutoDoc File Output Type: Specify the AutoDoc output type." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_subtemplate_type do? : autodoc subtemplate type config.toml: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_subtemplate_type. : autodoc subtemplate type config.toml: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: AutoDoc SubTemplate Type: . : Set the autodoc subtemplate type config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_subtemplate_type", + "output": "autodoc subtemplate type config.toml: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_subtemplate_type", + "output": "autodoc subtemplate type config.toml: AutoDoc SubTemplate Type: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc subtemplate type", + "output": "autodoc subtemplate type config.toml: AutoDoc SubTemplate Type: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "AutoDoc SubTemplate Type: ", + "output": "autodoc subtemplate type config.toml: AutoDoc SubTemplate Type: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_subtemplate_type", + "output": "autodoc subtemplate type config.toml: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_subtemplate_type", + "output": "autodoc subtemplate type config.toml: AutoDoc SubTemplate Type: Specify the type of sub-templates to use. Options are 'auto', 'docx' or 'md'." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_max_cm_size do? : autodoc max cm size config.toml: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_max_cm_size. : autodoc max cm size config.toml: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Confusion Matrix Max Number of Classes: . : Set the autodoc max cm size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_max_cm_size", + "output": "autodoc max cm size config.toml: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_max_cm_size", + "output": "autodoc max cm size config.toml: Confusion Matrix Max Number of Classes: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc max cm size", + "output": "autodoc max cm size config.toml: Confusion Matrix Max Number of Classes: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Confusion Matrix Max Number of Classes: ", + "output": "autodoc max cm size config.toml: Confusion Matrix Max Number of Classes: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_max_cm_size", + "output": "autodoc max cm size config.toml: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_max_cm_size", + "output": "autodoc max cm size config.toml: Confusion Matrix Max Number of Classes: Specify the maximum number of classes in the confusion matrix." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_num_features do? : autodoc num features config.toml: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_num_features. : autodoc num features config.toml: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of Top Features to Document: . : Set the autodoc num features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_num_features", + "output": "autodoc num features config.toml: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_num_features", + "output": "autodoc num features config.toml: Number of Top Features to Document: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc num features", + "output": "autodoc num features config.toml: Number of Top Features to Document: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of Top Features to Document: ", + "output": "autodoc num features config.toml: Number of Top Features to Document: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_num_features", + "output": "autodoc num features config.toml: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_num_features", + "output": "autodoc num features config.toml: Number of Top Features to Document: Specify the number of top features to display in the document. setting to -1 disables this restriction." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_min_relative_importance do? : autodoc min relative importance config.toml: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_min_relative_importance. : autodoc min relative importance config.toml: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minimum Relative Feature Importance Threshold: . : Set the autodoc min relative importance config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_min_relative_importance", + "output": "autodoc min relative importance config.toml: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_min_relative_importance", + "output": "autodoc min relative importance config.toml: Minimum Relative Feature Importance Threshold: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc min relative importance", + "output": "autodoc min relative importance config.toml: Minimum Relative Feature Importance Threshold: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minimum Relative Feature Importance Threshold: ", + "output": "autodoc min relative importance config.toml: Minimum Relative Feature Importance Threshold: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_min_relative_importance", + "output": "autodoc min relative importance config.toml: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_min_relative_importance", + "output": "autodoc min relative importance config.toml: Minimum Relative Feature Importance Threshold: Specify the minimum relative importance in order for a feature to be displayed. autodoc_min_relative_importance must be a float >= 0 and <= 1." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_include_permutation_feature_importance do? : autodoc include permutation feature importance config.toml: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_include_permutation_feature_importance. : autodoc include permutation feature importance config.toml: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Permutation Feature Importance: . : Set the autodoc include permutation feature importance config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_include_permutation_feature_importance", + "output": "autodoc include permutation feature importance config.toml: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_include_permutation_feature_importance", + "output": "autodoc include permutation feature importance config.toml: Permutation Feature Importance: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc include permutation feature importance", + "output": "autodoc include permutation feature importance config.toml: Permutation Feature Importance: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Permutation Feature Importance: ", + "output": "autodoc include permutation feature importance config.toml: Permutation Feature Importance: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_include_permutation_feature_importance", + "output": "autodoc include permutation feature importance config.toml: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_include_permutation_feature_importance", + "output": "autodoc include permutation feature importance config.toml: Permutation Feature Importance: Whether to compute permutation based feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_feature_importance_num_perm do? : autodoc feature importance num perm config.toml: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_feature_importance_num_perm. : autodoc feature importance num perm config.toml: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of Permutations for Feature Importance: . : Set the autodoc feature importance num perm config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_feature_importance_num_perm", + "output": "autodoc feature importance num perm config.toml: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_feature_importance_num_perm", + "output": "autodoc feature importance num perm config.toml: Number of Permutations for Feature Importance: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc feature importance num perm", + "output": "autodoc feature importance num perm config.toml: Number of Permutations for Feature Importance: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of Permutations for Feature Importance: ", + "output": "autodoc feature importance num perm config.toml: Number of Permutations for Feature Importance: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_feature_importance_num_perm", + "output": "autodoc feature importance num perm config.toml: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_feature_importance_num_perm", + "output": "autodoc feature importance num perm config.toml: Number of Permutations for Feature Importance: Number of permutations to make per feature when computing feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_feature_importance_scorer do? : autodoc feature importance scorer config.toml: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_feature_importance_scorer. : autodoc feature importance scorer config.toml: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Feature Importance Scorer: . : Set the autodoc feature importance scorer config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_feature_importance_scorer", + "output": "autodoc feature importance scorer config.toml: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_feature_importance_scorer", + "output": "autodoc feature importance scorer config.toml: Feature Importance Scorer: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc feature importance scorer", + "output": "autodoc feature importance scorer config.toml: Feature Importance Scorer: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Feature Importance Scorer: ", + "output": "autodoc feature importance scorer config.toml: Feature Importance Scorer: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_feature_importance_scorer", + "output": "autodoc feature importance scorer config.toml: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_feature_importance_scorer", + "output": "autodoc feature importance scorer config.toml: Feature Importance Scorer: Name of the scorer to be used to calculate feature importance. Leave blank to use experiments default scorer." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_pd_max_rows do? : autodoc pd max rows config.toml: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_pd_max_rows. : autodoc pd max rows config.toml: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: PDP and Shapley Summary Plot Max Rows: . : Set the autodoc pd max rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_max_rows", + "output": "autodoc pd max rows config.toml: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_max_rows", + "output": "autodoc pd max rows config.toml: PDP and Shapley Summary Plot Max Rows: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc pd max rows", + "output": "autodoc pd max rows config.toml: PDP and Shapley Summary Plot Max Rows: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "PDP and Shapley Summary Plot Max Rows: ", + "output": "autodoc pd max rows config.toml: PDP and Shapley Summary Plot Max Rows: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_pd_max_rows", + "output": "autodoc pd max rows config.toml: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_pd_max_rows", + "output": "autodoc pd max rows config.toml: PDP and Shapley Summary Plot Max Rows: The autodoc_pd_max_rows configuration controls the number of rows shown for the partial dependence plots (PDP) and Shapley values summary plot in the AutoDoc. Random sampling is used for datasets with more than the autodoc_pd_max_rows limit." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_pd_max_runtime do? : autodoc pd max runtime config.toml: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_pd_max_runtime. : autodoc pd max runtime config.toml: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: PDP max runtime in seconds: . : Set the autodoc pd max runtime config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_max_runtime", + "output": "autodoc pd max runtime config.toml: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_max_runtime", + "output": "autodoc pd max runtime config.toml: PDP max runtime in seconds: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc pd max runtime", + "output": "autodoc pd max runtime config.toml: PDP max runtime in seconds: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "PDP max runtime in seconds: ", + "output": "autodoc pd max runtime config.toml: PDP max runtime in seconds: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_pd_max_runtime", + "output": "autodoc pd max runtime config.toml: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_pd_max_runtime", + "output": "autodoc pd max runtime config.toml: PDP max runtime in seconds: Maximum number of seconds Partial Dependency computation can take when generating report. Set to -1 for no time limit." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_pd_fast_approx do? : autodoc pd fast approx config.toml: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_pd_fast_approx. : autodoc pd fast approx config.toml: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Use fast approximation for PDP: . : Set the autodoc pd fast approx config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_fast_approx", + "output": "autodoc pd fast approx config.toml: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_fast_approx", + "output": "autodoc pd fast approx config.toml: Use fast approximation for PDP: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc pd fast approx", + "output": "autodoc pd fast approx config.toml: Use fast approximation for PDP: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Use fast approximation for PDP: ", + "output": "autodoc pd fast approx config.toml: Use fast approximation for PDP: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_pd_fast_approx", + "output": "autodoc pd fast approx config.toml: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_pd_fast_approx", + "output": "autodoc pd fast approx config.toml: Use fast approximation for PDP: Whether to enable fast approximation for predictions that are needed for the generation of partial dependence plots. Can help when want to create many PDP plots in short time. Amount of approximation is controlled by fast_approx_num_trees, fast_approx_do_one_fold, fast_approx_do_one_model experiment expert settings. " + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_pd_max_int_as_cat_uniques do? : autodoc pd max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_pd_max_int_as_cat_uniques. : autodoc pd max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: PDP Max. number of unique values for int/float to be categoricals: . : Set the autodoc pd max int as cat uniques config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_max_int_as_cat_uniques", + "output": "autodoc pd max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_pd_max_int_as_cat_uniques", + "output": "autodoc pd max int as cat uniques config.toml: PDP Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc pd max int as cat uniques", + "output": "autodoc pd max int as cat uniques config.toml: PDP Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "PDP Max. number of unique values for int/float to be categoricals: ", + "output": "autodoc pd max int as cat uniques config.toml: PDP Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_pd_max_int_as_cat_uniques", + "output": "autodoc pd max int as cat uniques config.toml: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_pd_max_int_as_cat_uniques", + "output": "autodoc pd max int as cat uniques config.toml: PDP Max. number of unique values for int/float to be categoricals: Max number of unique values for integer/real columns to be treated as categoricals (test applies to first statistical_threshold_data_size_small rows only) Similar to max_int_as_cat_uniques used for experiment, but here used to control PDP making." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_out_of_range do? : autodoc out of range config.toml: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_out_of_range. : autodoc out of range config.toml: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: PDP Out of Range: . : Set the autodoc out of range config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_out_of_range", + "output": "autodoc out of range config.toml: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_out_of_range", + "output": "autodoc out of range config.toml: PDP Out of Range: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc out of range", + "output": "autodoc out of range config.toml: PDP Out of Range: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "PDP Out of Range: ", + "output": "autodoc out of range config.toml: PDP Out of Range: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_out_of_range", + "output": "autodoc out of range config.toml: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_out_of_range", + "output": "autodoc out of range config.toml: PDP Out of Range: Number of standard deviations outside of the range of a column to include in partial dependence plots. This shows how the model will react to data it has not seen before." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_num_rows do? : autodoc num rows config.toml: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_num_rows. : autodoc num rows config.toml: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: ICE Number of Rows: . : Set the autodoc num rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_num_rows", + "output": "autodoc num rows config.toml: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_num_rows", + "output": "autodoc num rows config.toml: ICE Number of Rows: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc num rows", + "output": "autodoc num rows config.toml: ICE Number of Rows: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ICE Number of Rows: ", + "output": "autodoc num rows config.toml: ICE Number of Rows: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_num_rows", + "output": "autodoc num rows config.toml: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_num_rows", + "output": "autodoc num rows config.toml: ICE Number of Rows: Specify the number of rows to include in PDP and ICE plot if individual rows are not specified." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_population_stability_index do? : autodoc population stability index config.toml: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_population_stability_index. : autodoc population stability index config.toml: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Population Stability Index: . : Set the autodoc population stability index config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_population_stability_index", + "output": "autodoc population stability index config.toml: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_population_stability_index", + "output": "autodoc population stability index config.toml: Population Stability Index: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc population stability index", + "output": "autodoc population stability index config.toml: Population Stability Index: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Population Stability Index: ", + "output": "autodoc population stability index config.toml: Population Stability Index: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_population_stability_index", + "output": "autodoc population stability index config.toml: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_population_stability_index", + "output": "autodoc population stability index config.toml: Population Stability Index: Whether to include population stability index if experiment is binary classification/regression." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_population_stability_index_n_quantiles do? : autodoc population stability index n quantiles config.toml: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_population_stability_index_n_quantiles. : autodoc population stability index n quantiles config.toml: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Population Stability Index Number of Quantiles: . : Set the autodoc population stability index n quantiles config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_population_stability_index_n_quantiles", + "output": "autodoc population stability index n quantiles config.toml: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_population_stability_index_n_quantiles", + "output": "autodoc population stability index n quantiles config.toml: Population Stability Index Number of Quantiles: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc population stability index n quantiles", + "output": "autodoc population stability index n quantiles config.toml: Population Stability Index Number of Quantiles: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Population Stability Index Number of Quantiles: ", + "output": "autodoc population stability index n quantiles config.toml: Population Stability Index Number of Quantiles: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_population_stability_index_n_quantiles", + "output": "autodoc population stability index n quantiles config.toml: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_population_stability_index_n_quantiles", + "output": "autodoc population stability index n quantiles config.toml: Population Stability Index Number of Quantiles: Number of quantiles to use for population stability index ." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_prediction_stats do? : autodoc prediction stats config.toml: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_prediction_stats. : autodoc prediction stats config.toml: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Prediction Statistics: . : Set the autodoc prediction stats config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_prediction_stats", + "output": "autodoc prediction stats config.toml: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_prediction_stats", + "output": "autodoc prediction stats config.toml: Prediction Statistics: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc prediction stats", + "output": "autodoc prediction stats config.toml: Prediction Statistics: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Prediction Statistics: ", + "output": "autodoc prediction stats config.toml: Prediction Statistics: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_prediction_stats", + "output": "autodoc prediction stats config.toml: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_prediction_stats", + "output": "autodoc prediction stats config.toml: Prediction Statistics: Whether to include prediction statistics information if experiment is binary classification/regression." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_prediction_stats_n_quantiles do? : autodoc prediction stats n quantiles config.toml: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_prediction_stats_n_quantiles. : autodoc prediction stats n quantiles config.toml: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Prediction Statistics Number of Quantiles: . : Set the autodoc prediction stats n quantiles config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_prediction_stats_n_quantiles", + "output": "autodoc prediction stats n quantiles config.toml: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_prediction_stats_n_quantiles", + "output": "autodoc prediction stats n quantiles config.toml: Prediction Statistics Number of Quantiles: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc prediction stats n quantiles", + "output": "autodoc prediction stats n quantiles config.toml: Prediction Statistics Number of Quantiles: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Prediction Statistics Number of Quantiles: ", + "output": "autodoc prediction stats n quantiles config.toml: Prediction Statistics Number of Quantiles: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_prediction_stats_n_quantiles", + "output": "autodoc prediction stats n quantiles config.toml: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_prediction_stats_n_quantiles", + "output": "autodoc prediction stats n quantiles config.toml: Prediction Statistics Number of Quantiles: Number of quantiles to use for prediction statistics." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_response_rate do? : autodoc response rate config.toml: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_response_rate. : autodoc response rate config.toml: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Response Rates Plot: . : Set the autodoc response rate config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_response_rate", + "output": "autodoc response rate config.toml: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_response_rate", + "output": "autodoc response rate config.toml: Response Rates Plot: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc response rate", + "output": "autodoc response rate config.toml: Response Rates Plot: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Response Rates Plot: ", + "output": "autodoc response rate config.toml: Response Rates Plot: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_response_rate", + "output": "autodoc response rate config.toml: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_response_rate", + "output": "autodoc response rate config.toml: Response Rates Plot: Whether to include response rates information if experiment is binary classification." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_response_rate_n_quantiles do? : autodoc response rate n quantiles config.toml: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_response_rate_n_quantiles. : autodoc response rate n quantiles config.toml: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Response Rate Plot Number of Quantiles: . : Set the autodoc response rate n quantiles config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_response_rate_n_quantiles", + "output": "autodoc response rate n quantiles config.toml: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_response_rate_n_quantiles", + "output": "autodoc response rate n quantiles config.toml: Response Rate Plot Number of Quantiles: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc response rate n quantiles", + "output": "autodoc response rate n quantiles config.toml: Response Rate Plot Number of Quantiles: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Response Rate Plot Number of Quantiles: ", + "output": "autodoc response rate n quantiles config.toml: Response Rate Plot Number of Quantiles: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_response_rate_n_quantiles", + "output": "autodoc response rate n quantiles config.toml: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_response_rate_n_quantiles", + "output": "autodoc response rate n quantiles config.toml: Response Rate Plot Number of Quantiles: Number of quantiles to use for response rates information ." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_gini_plot do? : autodoc gini plot config.toml: Whether to show the Gini Plot." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_gini_plot. : autodoc gini plot config.toml: Whether to show the Gini Plot." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Show GINI Plot: . : Set the autodoc gini plot config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_gini_plot", + "output": "autodoc gini plot config.toml: Whether to show the Gini Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_gini_plot", + "output": "autodoc gini plot config.toml: Show GINI Plot: Whether to show the Gini Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc gini plot", + "output": "autodoc gini plot config.toml: Show GINI Plot: Whether to show the Gini Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Show GINI Plot: ", + "output": "autodoc gini plot config.toml: Show GINI Plot: Whether to show the Gini Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_gini_plot", + "output": "autodoc gini plot config.toml: Whether to show the Gini Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_gini_plot", + "output": "autodoc gini plot config.toml: Show GINI Plot: Whether to show the Gini Plot." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_enable_shapley_values do? : autodoc enable shapley values config.toml: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_enable_shapley_values. : autodoc enable shapley values config.toml: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Shapley Values: . : Set the autodoc enable shapley values config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_enable_shapley_values", + "output": "autodoc enable shapley values config.toml: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_enable_shapley_values", + "output": "autodoc enable shapley values config.toml: Enable Shapley Values: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc enable shapley values", + "output": "autodoc enable shapley values config.toml: Enable Shapley Values: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Shapley Values: ", + "output": "autodoc enable shapley values config.toml: Enable Shapley Values: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_enable_shapley_values", + "output": "autodoc enable shapley values config.toml: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_enable_shapley_values", + "output": "autodoc enable shapley values config.toml: Enable Shapley Values: Show Shapley values results in the AutoDoc." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_global_klime_num_features do? : autodoc global klime num features config.toml: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_global_klime_num_features. : autodoc global klime num features config.toml: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Global KLIME Number of Features: . : Set the autodoc global klime num features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_global_klime_num_features", + "output": "autodoc global klime num features config.toml: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_global_klime_num_features", + "output": "autodoc global klime num features config.toml: Global KLIME Number of Features: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc global klime num features", + "output": "autodoc global klime num features config.toml: Global KLIME Number of Features: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Global KLIME Number of Features: ", + "output": "autodoc global klime num features config.toml: Global KLIME Number of Features: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_global_klime_num_features", + "output": "autodoc global klime num features config.toml: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_global_klime_num_features", + "output": "autodoc global klime num features config.toml: Global KLIME Number of Features: The number feature in a KLIME global GLM coefficients table. Must be an integer greater than 0 or -1. To show all features set to -1." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_global_klime_num_tables do? : autodoc global klime num tables config.toml: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_global_klime_num_tables. : autodoc global klime num tables config.toml: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Global KLIME Number of Tables: . : Set the autodoc global klime num tables config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_global_klime_num_tables", + "output": "autodoc global klime num tables config.toml: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_global_klime_num_tables", + "output": "autodoc global klime num tables config.toml: Global KLIME Number of Tables: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc global klime num tables", + "output": "autodoc global klime num tables config.toml: Global KLIME Number of Tables: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Global KLIME Number of Tables: ", + "output": "autodoc global klime num tables config.toml: Global KLIME Number of Tables: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_global_klime_num_tables", + "output": "autodoc global klime num tables config.toml: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_global_klime_num_tables", + "output": "autodoc global klime num tables config.toml: Global KLIME Number of Tables: Set the number of KLIME global GLM coefficients tables. Set to 1 to show one table with coefficients sorted by absolute value. Set to 2 to two tables one with the top positive coefficients and one with the top negative coefficients." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_data_summary_col_num do? : autodoc data summary col num config.toml: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_data_summary_col_num. : autodoc data summary col num config.toml: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of Features in Data Summary Table: . : Set the autodoc data summary col num config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_data_summary_col_num", + "output": "autodoc data summary col num config.toml: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_data_summary_col_num", + "output": "autodoc data summary col num config.toml: Number of Features in Data Summary Table: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc data summary col num", + "output": "autodoc data summary col num config.toml: Number of Features in Data Summary Table: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of Features in Data Summary Table: ", + "output": "autodoc data summary col num config.toml: Number of Features in Data Summary Table: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_data_summary_col_num", + "output": "autodoc data summary col num config.toml: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_data_summary_col_num", + "output": "autodoc data summary col num config.toml: Number of Features in Data Summary Table: Number of features to be show in data summary. Value must be an integer. Values lower than 1, f.e. 0 or -1, indicate that all columns should be shown." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_list_all_config_settings do? : autodoc list all config settings config.toml: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_list_all_config_settings. : autodoc list all config settings config.toml: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: List All Config Settings: . : Set the autodoc list all config settings config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_list_all_config_settings", + "output": "autodoc list all config settings config.toml: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_list_all_config_settings", + "output": "autodoc list all config settings config.toml: List All Config Settings: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc list all config settings", + "output": "autodoc list all config settings config.toml: List All Config Settings: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "List All Config Settings: ", + "output": "autodoc list all config settings config.toml: List All Config Settings: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_list_all_config_settings", + "output": "autodoc list all config settings config.toml: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_list_all_config_settings", + "output": "autodoc list all config settings config.toml: List All Config Settings: Whether to show all config settings. If False, only the changed settings (config overrides) are listed, otherwise all settings are listed." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_keras_summary_line_length do? : autodoc keras summary line length config.toml: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_keras_summary_line_length. : autodoc keras summary line length config.toml: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Keras Model Architecture Summary Line Length: . : Set the autodoc keras summary line length config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_keras_summary_line_length", + "output": "autodoc keras summary line length config.toml: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_keras_summary_line_length", + "output": "autodoc keras summary line length config.toml: Keras Model Architecture Summary Line Length: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc keras summary line length", + "output": "autodoc keras summary line length config.toml: Keras Model Architecture Summary Line Length: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Keras Model Architecture Summary Line Length: ", + "output": "autodoc keras summary line length config.toml: Keras Model Architecture Summary Line Length: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_keras_summary_line_length", + "output": "autodoc keras summary line length config.toml: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_keras_summary_line_length", + "output": "autodoc keras summary line length config.toml: Keras Model Architecture Summary Line Length: Line length of the keras model architecture summary. Must be an integer greater than 0 or -1. To use the default line length set value -1." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_transformer_architecture_max_lines do? : autodoc transformer architecture max lines config.toml: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_transformer_architecture_max_lines. : autodoc transformer architecture max lines config.toml: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: NLP/Image Transformer Architecture Max Lines: . : Set the autodoc transformer architecture max lines config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_transformer_architecture_max_lines", + "output": "autodoc transformer architecture max lines config.toml: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_transformer_architecture_max_lines", + "output": "autodoc transformer architecture max lines config.toml: NLP/Image Transformer Architecture Max Lines: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc transformer architecture max lines", + "output": "autodoc transformer architecture max lines config.toml: NLP/Image Transformer Architecture Max Lines: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "NLP/Image Transformer Architecture Max Lines: ", + "output": "autodoc transformer architecture max lines config.toml: NLP/Image Transformer Architecture Max Lines: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_transformer_architecture_max_lines", + "output": "autodoc transformer architecture max lines config.toml: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_transformer_architecture_max_lines", + "output": "autodoc transformer architecture max lines config.toml: NLP/Image Transformer Architecture Max Lines: Maximum number of lines shown for advanced transformer architecture in the Feature section. Note that the full architecture can be found in the Appendix." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_full_architecture_in_appendix do? : autodoc full architecture in appendix config.toml: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_full_architecture_in_appendix. : autodoc full architecture in appendix config.toml: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Appendix NLP/Image Transformer Architecture: . : Set the autodoc full architecture in appendix config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_full_architecture_in_appendix", + "output": "autodoc full architecture in appendix config.toml: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_full_architecture_in_appendix", + "output": "autodoc full architecture in appendix config.toml: Appendix NLP/Image Transformer Architecture: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc full architecture in appendix", + "output": "autodoc full architecture in appendix config.toml: Appendix NLP/Image Transformer Architecture: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Appendix NLP/Image Transformer Architecture: ", + "output": "autodoc full architecture in appendix config.toml: Appendix NLP/Image Transformer Architecture: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_full_architecture_in_appendix", + "output": "autodoc full architecture in appendix config.toml: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_full_architecture_in_appendix", + "output": "autodoc full architecture in appendix config.toml: Appendix NLP/Image Transformer Architecture: Show full NLP/Image transformer architecture in the Appendix." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_coef_table_appendix_results_table do? : autodoc coef table appendix results table config.toml: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_coef_table_appendix_results_table. : autodoc coef table appendix results table config.toml: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Full GLM Coefficients Table in the Appendix: . : Set the autodoc coef table appendix results table config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_appendix_results_table", + "output": "autodoc coef table appendix results table config.toml: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_appendix_results_table", + "output": "autodoc coef table appendix results table config.toml: Full GLM Coefficients Table in the Appendix: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc coef table appendix results table", + "output": "autodoc coef table appendix results table config.toml: Full GLM Coefficients Table in the Appendix: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Full GLM Coefficients Table in the Appendix: ", + "output": "autodoc coef table appendix results table config.toml: Full GLM Coefficients Table in the Appendix: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_coef_table_appendix_results_table", + "output": "autodoc coef table appendix results table config.toml: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_coef_table_appendix_results_table", + "output": "autodoc coef table appendix results table config.toml: Full GLM Coefficients Table in the Appendix: Specify whether to show the full glm coefficient table(s) in the appendix. coef_table_appendix_results_table must be a boolean: True to show tables in appendix, False to not show them ." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_coef_table_num_models do? : autodoc coef table num models config.toml: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_coef_table_num_models. : autodoc coef table num models config.toml: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: GLM Coefficient Tables Number of Models: . : Set the autodoc coef table num models config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_models", + "output": "autodoc coef table num models config.toml: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_models", + "output": "autodoc coef table num models config.toml: GLM Coefficient Tables Number of Models: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc coef table num models", + "output": "autodoc coef table num models config.toml: GLM Coefficient Tables Number of Models: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "GLM Coefficient Tables Number of Models: ", + "output": "autodoc coef table num models config.toml: GLM Coefficient Tables Number of Models: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_coef_table_num_models", + "output": "autodoc coef table num models config.toml: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_coef_table_num_models", + "output": "autodoc coef table num models config.toml: GLM Coefficient Tables Number of Models: Set the number of models for which a glm coefficients table is shown in the AutoDoc. coef_table_num_models must be -1 or an integer >= 1 (-1 shows all models)." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_coef_table_num_folds do? : autodoc coef table num folds config.toml: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_coef_table_num_folds. : autodoc coef table num folds config.toml: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: GLM Coefficient Tables Number of Folds Per Model: . : Set the autodoc coef table num folds config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_folds", + "output": "autodoc coef table num folds config.toml: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_folds", + "output": "autodoc coef table num folds config.toml: GLM Coefficient Tables Number of Folds Per Model: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc coef table num folds", + "output": "autodoc coef table num folds config.toml: GLM Coefficient Tables Number of Folds Per Model: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "GLM Coefficient Tables Number of Folds Per Model: ", + "output": "autodoc coef table num folds config.toml: GLM Coefficient Tables Number of Folds Per Model: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_coef_table_num_folds", + "output": "autodoc coef table num folds config.toml: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_coef_table_num_folds", + "output": "autodoc coef table num folds config.toml: GLM Coefficient Tables Number of Folds Per Model: Set the number of folds per model for which a glm coefficients table is shown in the AutoDoc. coef_table_num_folds must be -1 or an integer >= 1 (-1 shows all folds per model)." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_coef_table_num_coef do? : autodoc coef table num coef config.toml: Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_coef_table_num_coef. : autodoc coef table num coef config.toml: Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: GLM Coefficient Tables Number of Coefficients : . : Set the autodoc coef table num coef config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_coef", + "output": "autodoc coef table num coef config.toml: Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_coef", + "output": "autodoc coef table num coef config.toml: GLM Coefficient Tables Number of Coefficients : Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc coef table num coef", + "output": "autodoc coef table num coef config.toml: GLM Coefficient Tables Number of Coefficients : Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "GLM Coefficient Tables Number of Coefficients : ", + "output": "autodoc coef table num coef config.toml: GLM Coefficient Tables Number of Coefficients : Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_coef_table_num_coef", + "output": "autodoc coef table num coef config.toml: Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_coef_table_num_coef", + "output": "autodoc coef table num coef config.toml: GLM Coefficient Tables Number of Coefficients : Set the number of coefficients to show within a glm coefficients table in the AutoDoc. coef_table_num_coef, controls the number of rows shown in a glm table and must be -1 or an integer >= 1 (-1 shows all coefficients)." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_coef_table_num_classes do? : autodoc coef table num classes config.toml: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_coef_table_num_classes. : autodoc coef table num classes config.toml: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: GLM Coefficient Tables Number of Classes: . : Set the autodoc coef table num classes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_classes", + "output": "autodoc coef table num classes config.toml: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_coef_table_num_classes", + "output": "autodoc coef table num classes config.toml: GLM Coefficient Tables Number of Classes: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc coef table num classes", + "output": "autodoc coef table num classes config.toml: GLM Coefficient Tables Number of Classes: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "GLM Coefficient Tables Number of Classes: ", + "output": "autodoc coef table num classes config.toml: GLM Coefficient Tables Number of Classes: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_coef_table_num_classes", + "output": "autodoc coef table num classes config.toml: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_coef_table_num_classes", + "output": "autodoc coef table num classes config.toml: GLM Coefficient Tables Number of Classes: Set the number of classes to show within a glm coefficients table in the AutoDoc. coef_table_num_classes controls the number of class-columns shown in a glm table and must be -1 or an integer >= 4 (-1 shows all classes)." + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_num_histogram_plots do? : autodoc num histogram plots config.toml: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_num_histogram_plots. : autodoc num histogram plots config.toml: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of Histograms to Show: . : Set the autodoc num histogram plots config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_num_histogram_plots", + "output": "autodoc num histogram plots config.toml: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_num_histogram_plots", + "output": "autodoc num histogram plots config.toml: Number of Histograms to Show: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc num histogram plots", + "output": "autodoc num histogram plots config.toml: Number of Histograms to Show: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of Histograms to Show: ", + "output": "autodoc num histogram plots config.toml: Number of Histograms to Show: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_num_histogram_plots", + "output": "autodoc num histogram plots config.toml: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_num_histogram_plots", + "output": "autodoc num histogram plots config.toml: Number of Histograms to Show: When histogram plots are available: The number of top (default 10) features for which to show histograms." + }, + { + "prompt_type": "plain", + "instruction": ": What does pdp_max_threads do? : pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain pdp_max_threads. : pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pdp_max_threads", + "output": "pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pdp_max_threads", + "output": "pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pdp max threads", + "output": "pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of threads/forks for autoreport PDP. -1 means auto.: ", + "output": "pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pdp_max_threads", + "output": "pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pdp_max_threads", + "output": "pdp max threads config.toml: Maximum number of threads/forks for autoreport PDP. -1 means auto.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does autodoc_force_singlenode do? : autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "plain", + "instruction": ": Explain autodoc_force_singlenode. : autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_force_singlenode", + "output": "autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc_force_singlenode", + "output": "autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autodoc force singlenode", + "output": "autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autodoc_force_singlenode", + "output": "autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autodoc_force_singlenode", + "output": "autodoc force singlenode config.toml: If True, will force AutoDoc to run in only the main server, not on remote workers in case of a multi-node setup" + }, + { + "prompt_type": "plain", + "instruction": ": What does vis_server_ip do? : vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "plain", + "instruction": ": Explain vis_server_ip. : vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "vis_server_ip", + "output": "vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "vis_server_ip", + "output": "vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "vis server ip", + "output": "vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting vis_server_ip", + "output": "vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting vis_server_ip", + "output": "vis server ip config.toml: IP address and port of autoviz process." + }, + { + "prompt_type": "plain", + "instruction": ": What does vis_server_port do? : vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "plain", + "instruction": ": Explain vis_server_port. : vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "vis_server_port", + "output": "vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "vis_server_port", + "output": "vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "vis server port", + "output": "vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting vis_server_port", + "output": "vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting vis_server_port", + "output": "vis server port config.toml: IP and port of autoviz process." + }, + { + "prompt_type": "plain", + "instruction": ": What does autoviz_max_num_columns do? : autoviz max num columns config.toml: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "plain", + "instruction": ": Explain autoviz_max_num_columns. : autoviz max num columns config.toml: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of column for Autoviz: . : Set the autoviz max num columns config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_max_num_columns", + "output": "autoviz max num columns config.toml: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_max_num_columns", + "output": "autoviz max num columns config.toml: Maximum number of column for Autoviz: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz max num columns", + "output": "autoviz max num columns config.toml: Maximum number of column for Autoviz: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of column for Autoviz: ", + "output": "autoviz max num columns config.toml: Maximum number of column for Autoviz: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autoviz_max_num_columns", + "output": "autoviz max num columns config.toml: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autoviz_max_num_columns", + "output": "autoviz max num columns config.toml: Maximum number of column for Autoviz: Maximum number of columns autoviz will work with. If dataset has more columns than this number, autoviz will pick columns randomly, prioritizing numerical columns " + }, + { + "prompt_type": "plain", + "instruction": ": What does autoviz_max_aggregated_rows do? : autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain autoviz_max_aggregated_rows. : autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_max_aggregated_rows", + "output": "autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_max_aggregated_rows", + "output": "autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz max aggregated rows", + "output": "autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of rows in aggregated frame: ", + "output": "autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autoviz_max_aggregated_rows", + "output": "autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autoviz_max_aggregated_rows", + "output": "autoviz max aggregated rows config.toml: Maximum number of rows in aggregated frame: " + }, + { + "prompt_type": "plain", + "instruction": ": What does autoviz_enable_recommendations do? : autoviz enable recommendations config.toml: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "plain", + "instruction": ": Explain autoviz_enable_recommendations. : autoviz enable recommendations config.toml: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Autoviz Use Recommended Transformations: . : Set the autoviz enable recommendations config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_enable_recommendations", + "output": "autoviz enable recommendations config.toml: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_enable_recommendations", + "output": "autoviz enable recommendations config.toml: Autoviz Use Recommended Transformations: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz enable recommendations", + "output": "autoviz enable recommendations config.toml: Autoviz Use Recommended Transformations: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Autoviz Use Recommended Transformations: ", + "output": "autoviz enable recommendations config.toml: Autoviz Use Recommended Transformations: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autoviz_enable_recommendations", + "output": "autoviz enable recommendations config.toml: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autoviz_enable_recommendations", + "output": "autoviz enable recommendations config.toml: Autoviz Use Recommended Transformations: When enabled, experiment will try to use feature transformations recommended by Autoviz" + }, + { + "prompt_type": "plain", + "instruction": ": What does autoviz_recommended_transformation do? : autoviz recommended transformation config.toml: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "plain", + "instruction": ": Explain autoviz_recommended_transformation. : autoviz recommended transformation config.toml: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Autoviz Recommended Transformations: . : Set the autoviz recommended transformation config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_recommended_transformation", + "output": "autoviz recommended transformation config.toml: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz_recommended_transformation", + "output": "autoviz recommended transformation config.toml: Autoviz Recommended Transformations: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "autoviz recommended transformation", + "output": "autoviz recommended transformation config.toml: Autoviz Recommended Transformations: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Autoviz Recommended Transformations: ", + "output": "autoviz recommended transformation config.toml: Autoviz Recommended Transformations: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting autoviz_recommended_transformation", + "output": "autoviz recommended transformation config.toml: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting autoviz_recommended_transformation", + "output": "autoviz recommended transformation config.toml: Autoviz Recommended Transformations: Key-value pairs of column names, and transformations that Autoviz recommended" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_custom_recipes do? : enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_custom_recipes. : enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes", + "output": "enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes", + "output": "enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable custom recipes", + "output": "enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_custom_recipes", + "output": "enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_custom_recipes", + "output": "enable custom recipes config.toml: Enable custom recipes." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_custom_recipes_upload do? : enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_custom_recipes_upload. : enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes_upload", + "output": "enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes_upload", + "output": "enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable custom recipes upload", + "output": "enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_custom_recipes_upload", + "output": "enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_custom_recipes_upload", + "output": "enable custom recipes upload config.toml: Enable uploading of custom recipes from local file system." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_custom_recipes_from_url do? : enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_custom_recipes_from_url. : enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes_from_url", + "output": "enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes_from_url", + "output": "enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable custom recipes from url", + "output": "enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_custom_recipes_from_url", + "output": "enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_custom_recipes_from_url", + "output": "enable custom recipes from url config.toml: Enable downloading of custom recipes from external URL." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_custom_recipes_from_zip do? : enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_custom_recipes_from_zip. : enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes_from_zip", + "output": "enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_custom_recipes_from_zip", + "output": "enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable custom recipes from zip", + "output": "enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_custom_recipes_from_zip", + "output": "enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_custom_recipes_from_zip", + "output": "enable custom recipes from zip config.toml: Enable upload recipe files to be zip, containing custom recipe(s) in root folder, while any other code or auxillary files must be in some sub-folder. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_recreate_custom_recipes_env do? : enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_recreate_custom_recipes_env. : enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_recreate_custom_recipes_env", + "output": "enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_recreate_custom_recipes_env", + "output": "enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable recreate custom recipes env", + "output": "enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_recreate_custom_recipes_env", + "output": "enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_recreate_custom_recipes_env", + "output": "enable recreate custom recipes env config.toml: When set to true, it enable downloading custom recipes third party packages from the web, otherwise the python environment will be transferred from main worker." + }, + { + "prompt_type": "plain", + "instruction": ": What does extra_migration_custom_recipes_missing_modules do? : extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain extra_migration_custom_recipes_missing_modules. : extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extra_migration_custom_recipes_missing_modules", + "output": "extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extra_migration_custom_recipes_missing_modules", + "output": "extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extra migration custom recipes missing modules", + "output": "extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: ", + "output": "extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting extra_migration_custom_recipes_missing_modules", + "output": "extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting extra_migration_custom_recipes_missing_modules", + "output": "extra migration custom recipes missing modules config.toml: Whether to enable extra attempt to migrate custom modules during preview to show preview. Can lead to slow preview loading.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does include_custom_recipes_by_default do? : include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain include_custom_recipes_by_default. : include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "include_custom_recipes_by_default", + "output": "include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "include_custom_recipes_by_default", + "output": "include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "include custom recipes by default", + "output": "include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting include_custom_recipes_by_default", + "output": "include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting include_custom_recipes_by_default", + "output": "include custom recipes by default config.toml: Include custom recipes in default inclusion lists (warning: enables all custom recipes)" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_h2o_recipes do? : enable h2o recipes config.toml: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_h2o_recipes. : enable h2o recipes config.toml: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable h2o recipes server: . : Set the enable h2o recipes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_h2o_recipes", + "output": "enable h2o recipes config.toml: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_h2o_recipes", + "output": "enable h2o recipes config.toml: Enable h2o recipes server: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable h2o recipes", + "output": "enable h2o recipes config.toml: Enable h2o recipes server: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable h2o recipes server: ", + "output": "enable h2o recipes config.toml: Enable h2o recipes server: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_h2o_recipes", + "output": "enable h2o recipes config.toml: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_h2o_recipes", + "output": "enable h2o recipes config.toml: Enable h2o recipes server: Whether to enable use of H2O recipe server. In some casees, recipe server (started at DAI startup) may enter into an unstable state, and this might affect other experiments. Then one can avoid triggering use of the recipe server by setting this to false." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_url do? : h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_url. : h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_url", + "output": "h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_url", + "output": "h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes url", + "output": "h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_url", + "output": "h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_url", + "output": "h2o recipes url config.toml: URL of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_ip do? : h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_ip. : h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_ip", + "output": "h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_ip", + "output": "h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes ip", + "output": "h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_ip", + "output": "h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_ip", + "output": "h2o recipes ip config.toml: IP of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_port do? : h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_port. : h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_port", + "output": "h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_port", + "output": "h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes port", + "output": "h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_port", + "output": "h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_port", + "output": "h2o recipes port config.toml: Port of H2O instance for use by transformers, models, or scorers. No other instances must be on that port or on next port." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_name do? : h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_name. : h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_name", + "output": "h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_name", + "output": "h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes name", + "output": "h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_name", + "output": "h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_name", + "output": "h2o recipes name config.toml: Name of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_nthreads do? : h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_nthreads. : h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_nthreads", + "output": "h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_nthreads", + "output": "h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes nthreads", + "output": "h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_nthreads", + "output": "h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_nthreads", + "output": "h2o recipes nthreads config.toml: Number of threads for H2O instance for use by transformers, models, or scorers. -1 for all." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_log_level do? : h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_log_level. : h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_log_level", + "output": "h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_log_level", + "output": "h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes log level", + "output": "h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_log_level", + "output": "h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_log_level", + "output": "h2o recipes log level config.toml: Log Level of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_max_mem_size do? : h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_max_mem_size. : h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_max_mem_size", + "output": "h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_max_mem_size", + "output": "h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes max mem size", + "output": "h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_max_mem_size", + "output": "h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_max_mem_size", + "output": "h2o recipes max mem size config.toml: Maximum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_min_mem_size do? : h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_min_mem_size. : h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_min_mem_size", + "output": "h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_min_mem_size", + "output": "h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes min mem size", + "output": "h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_min_mem_size", + "output": "h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_min_mem_size", + "output": "h2o recipes min mem size config.toml: Minimum memory size of H2O instance for use by transformers, models, or scorers." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_kwargs do? : h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_kwargs. : h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_kwargs", + "output": "h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_kwargs", + "output": "h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes kwargs", + "output": "h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_kwargs", + "output": "h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_kwargs", + "output": "h2o recipes kwargs config.toml: General user overrides of kwargs dict to pass to h2o.init() for recipe server." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_start_trials do? : h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_start_trials. : h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_start_trials", + "output": "h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_start_trials", + "output": "h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes start trials", + "output": "h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_start_trials", + "output": "h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_start_trials", + "output": "h2o recipes start trials config.toml: Number of trials to give h2o-3 recipe server to start." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_start_sleep0 do? : h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_start_sleep0. : h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_start_sleep0", + "output": "h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_start_sleep0", + "output": "h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes start sleep0", + "output": "h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_start_sleep0", + "output": "h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_start_sleep0", + "output": "h2o recipes start sleep0 config.toml: Number of seconds to sleep before starting h2o-3 recipe server." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_recipes_start_sleep do? : h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_recipes_start_sleep. : h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_start_sleep", + "output": "h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_recipes_start_sleep", + "output": "h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o recipes start sleep", + "output": "h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_recipes_start_sleep", + "output": "h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_recipes_start_sleep", + "output": "h2o recipes start sleep config.toml: Number of seconds to sleep between trials of starting h2o-3 recipe server." + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipes_lock_to_git_repo do? : custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipes_lock_to_git_repo. : custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_lock_to_git_repo", + "output": "custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_lock_to_git_repo", + "output": "custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipes lock to git repo", + "output": "custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipes_lock_to_git_repo", + "output": "custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipes_lock_to_git_repo", + "output": "custom recipes lock to git repo config.toml: Lock source for recipes to a specific github repo. If True then all custom recipes must come from the repo specified in setting: custom_recipes_git_repo" + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipes_git_repo do? : custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipes_git_repo. : custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_git_repo", + "output": "custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_git_repo", + "output": "custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipes git repo", + "output": "custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipes_git_repo", + "output": "custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipes_git_repo", + "output": "custom recipes git repo config.toml: If custom_recipes_lock_to_git_repo is set to True, only this repo can be used to pull recipes from" + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipes_git_branch do? : custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipes_git_branch. : custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_git_branch", + "output": "custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_git_branch", + "output": "custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipes git branch", + "output": "custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipes_git_branch", + "output": "custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipes_git_branch", + "output": "custom recipes git branch config.toml: Branch constraint for recipe source repo. Any branch allowed if unset or None" + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipes_excluded_filenames_from_repo_download do? : custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipes_excluded_filenames_from_repo_download. : custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_excluded_filenames_from_repo_download", + "output": "custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipes_excluded_filenames_from_repo_download", + "output": "custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipes excluded filenames from repo download", + "output": "custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "basenames of files to exclude from repo download: ", + "output": "custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipes_excluded_filenames_from_repo_download", + "output": "custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipes_excluded_filenames_from_repo_download", + "output": "custom recipes excluded filenames from repo download config.toml: basenames of files to exclude from repo download: " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_old_recipes_use_datadir_as_data_directory do? : allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_old_recipes_use_datadir_as_data_directory. : allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_old_recipes_use_datadir_as_data_directory", + "output": "allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_old_recipes_use_datadir_as_data_directory", + "output": "allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow old recipes use datadir as data directory", + "output": "allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: ", + "output": "allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_old_recipes_use_datadir_as_data_directory", + "output": "allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_old_recipes_use_datadir_as_data_directory", + "output": "allow old recipes use datadir as data directory config.toml: Allow use of deprecated get_global_directory() method from custom recipes for backward compatibility of recipes created before 1.9.0. Disable to force separation of custom recipes per user (in which case user_dir() should be used instead).: " + }, + { + "prompt_type": "plain", + "instruction": ": What does last_recipe do? : last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "plain", + "instruction": ": Explain last_recipe. : last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "last_recipe", + "output": "last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "last_recipe", + "output": "last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "last recipe", + "output": "last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting last_recipe", + "output": "last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting last_recipe", + "output": "last recipe config.toml: Internal helper to allow memory of if changed recipe" + }, + { + "prompt_type": "plain", + "instruction": ": What does recipe_dict do? : recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "plain", + "instruction": ": Explain recipe_dict. : recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe_dict", + "output": "recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe_dict", + "output": "recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe dict", + "output": "recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting recipe_dict", + "output": "recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting recipe_dict", + "output": "recipe dict config.toml: Dictionary to control recipes for each experiment and particular custom recipes. E.g. if inserting into the GUI as any toml string, can use: \"\"recipe_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: recipe_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "plain", + "instruction": ": What does mutation_dict do? : mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "plain", + "instruction": ": Explain mutation_dict. : mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutation_dict", + "output": "mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutation_dict", + "output": "mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mutation dict", + "output": "mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mutation_dict", + "output": "mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mutation_dict", + "output": "mutation dict config.toml: Dictionary to control some mutation parameters. E.g. if inserting into the GUI as any toml string, can use: \"\"mutation_dict=\"{'key1': 2, 'key2': 'value2'}\"\"\" E.g. if putting into config.toml as a dict, can use: mutation_dict=\"{'key1': 2, 'key2': 'value2'}\" " + }, + { + "prompt_type": "plain", + "instruction": ": What does raise_on_invalid_included_list do? : raise on invalid included list config.toml: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain raise_on_invalid_included_list. : raise on invalid included list config.toml: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to validate recipe names: . : Set the raise on invalid included list config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "raise_on_invalid_included_list", + "output": "raise on invalid included list config.toml: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "raise_on_invalid_included_list", + "output": "raise on invalid included list config.toml: Whether to validate recipe names: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "raise on invalid included list", + "output": "raise on invalid included list config.toml: Whether to validate recipe names: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to validate recipe names: ", + "output": "raise on invalid included list config.toml: Whether to validate recipe names: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting raise_on_invalid_included_list", + "output": "raise on invalid included list config.toml: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting raise_on_invalid_included_list", + "output": "raise on invalid included list config.toml: Whether to validate recipe names: Whether to validate recipe names provided in included lists, like included_models, or (if False) whether to just log warning to server logs and ignore any invalid names of recipes. " + }, + { + "prompt_type": "plain", + "instruction": ": What does contrib_relative_directory do? : contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain contrib_relative_directory. : contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_relative_directory", + "output": "contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_relative_directory", + "output": "contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib relative directory", + "output": "contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Base directory for recipes within data directory.: ", + "output": "contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting contrib_relative_directory", + "output": "contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting contrib_relative_directory", + "output": "contrib relative directory config.toml: Base directory for recipes within data directory.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does contrib_env_relative_directory do? : contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain contrib_env_relative_directory. : contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_env_relative_directory", + "output": "contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_env_relative_directory", + "output": "contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib env relative directory", + "output": "contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting contrib_env_relative_directory", + "output": "contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting contrib_env_relative_directory", + "output": "contrib env relative directory config.toml: location of custom recipes packages installed (relative to data_directory) We will try to install packages dynamically, but can also do (before or after server started): (inside docker running docker instance if running docker, or as user server is running as (e.g. dai user) if deb/tar native installation: PYTHONPATH=//lib/python3.6/site-packages/ dai-env.sh python -m pip install --prefix=/ --upgrade --upgrade-strategy only-if-needed --log-file pip_log_file.log where is /opt/h2oai/dai/ for native rpm/deb installation Note can also install wheel files if is name of wheel file or archive. " + }, + { + "prompt_type": "plain", + "instruction": ": What does ignore_package_version do? : ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ignore_package_version. : ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ignore_package_version", + "output": "ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ignore_package_version", + "output": "ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ignore package version", + "output": "ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ignore_package_version", + "output": "ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ignore_package_version", + "output": "ignore package version config.toml: List of package versions to ignore. Useful when small version change but likely to function still with old package version. " + }, + { + "prompt_type": "plain", + "instruction": ": What does clobber_package_version do? : clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain clobber_package_version. : clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "clobber_package_version", + "output": "clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "clobber_package_version", + "output": "clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "clobber package version", + "output": "clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting clobber_package_version", + "output": "clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting clobber_package_version", + "output": "clobber package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. " + }, + { + "prompt_type": "plain", + "instruction": ": What does swap_package_version do? : swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain swap_package_version. : swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "swap_package_version", + "output": "swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "swap_package_version", + "output": "swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "swap package version", + "output": "swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting swap_package_version", + "output": "swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting swap_package_version", + "output": "swap package version config.toml: List of package versions to remove if encounter conflict. Useful when want new version of package, and old recipes likely to function still. Also useful when do not need to use old versions of recipes even if they would no longer function. " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_version_change_user_packages do? : allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_version_change_user_packages. : allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_version_change_user_packages", + "output": "allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_version_change_user_packages", + "output": "allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow version change user packages", + "output": "allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_version_change_user_packages", + "output": "allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_version_change_user_packages", + "output": "allow version change user packages config.toml: If user uploads recipe with changes to package versions, allow upgrade of package versions. If DAI protected packages are attempted to be changed, can try using pip_install_options toml with ['--no-deps']. Or to ignore entirely DAI versions of packages, can try using pip_install_options toml with ['--ignore-installed']. Any other experiments relying on recipes with such packages will be affected, use with caution." + }, + { + "prompt_type": "plain", + "instruction": ": What does pip_install_overall_retries do? : pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "plain", + "instruction": ": Explain pip_install_overall_retries. : pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_overall_retries", + "output": "pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_overall_retries", + "output": "pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip install overall retries", + "output": "pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pip_install_overall_retries", + "output": "pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pip_install_overall_retries", + "output": "pip install overall retries config.toml: pip install retry for call to pip. Sometimes need to try twice" + }, + { + "prompt_type": "plain", + "instruction": ": What does pip_install_verbosity do? : pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "plain", + "instruction": ": Explain pip_install_verbosity. : pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_verbosity", + "output": "pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_verbosity", + "output": "pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip install verbosity", + "output": "pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pip_install_verbosity", + "output": "pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pip_install_verbosity", + "output": "pip install verbosity config.toml: pip install verbosity level (number of -v's given to pip, up to 3" + }, + { + "prompt_type": "plain", + "instruction": ": What does pip_install_timeout do? : pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "plain", + "instruction": ": Explain pip_install_timeout. : pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_timeout", + "output": "pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_timeout", + "output": "pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip install timeout", + "output": "pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pip_install_timeout", + "output": "pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pip_install_timeout", + "output": "pip install timeout config.toml: pip install timeout in seconds, Sometimes internet issues would mean want to fail faster" + }, + { + "prompt_type": "plain", + "instruction": ": What does pip_install_retries do? : pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "plain", + "instruction": ": Explain pip_install_retries. : pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_retries", + "output": "pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_retries", + "output": "pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip install retries", + "output": "pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pip_install_retries", + "output": "pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pip_install_retries", + "output": "pip install retries config.toml: pip install retry count" + }, + { + "prompt_type": "plain", + "instruction": ": What does pip_install_use_constraint do? : pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pip_install_use_constraint. : pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_use_constraint", + "output": "pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_use_constraint", + "output": "pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip install use constraint", + "output": "pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pip_install_use_constraint", + "output": "pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pip_install_use_constraint", + "output": "pip install use constraint config.toml: Whether to use DAI constraint file to help pip handle versions. pip can make mistakes and try to install updated packages for no reason." + }, + { + "prompt_type": "plain", + "instruction": ": What does pip_install_options do? : pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "plain", + "instruction": ": Explain pip_install_options. : pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_options", + "output": "pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip_install_options", + "output": "pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pip install options", + "output": "pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pip_install_options", + "output": "pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pip_install_options", + "output": "pip install options config.toml: pip install options: string of list of other options, e.g. ['--proxy', 'http://user:password@proxyserver:port']" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_basic_acceptance_tests do? : enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_basic_acceptance_tests. : enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_basic_acceptance_tests", + "output": "enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_basic_acceptance_tests", + "output": "enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable basic acceptance tests", + "output": "enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_basic_acceptance_tests", + "output": "enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_basic_acceptance_tests", + "output": "enable basic acceptance tests config.toml: Whether to enable basic acceptance testing. Tests if can pickle the state, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_acceptance_tests do? : enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_acceptance_tests. : enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_acceptance_tests", + "output": "enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_acceptance_tests", + "output": "enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable acceptance tests", + "output": "enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_acceptance_tests", + "output": "enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_acceptance_tests", + "output": "enable acceptance tests config.toml: Whether acceptance tests should run for custom genes / models / scorers / etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does skip_disabled_recipes do? : skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain skip_disabled_recipes. : skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_disabled_recipes", + "output": "skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip_disabled_recipes", + "output": "skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "skip disabled recipes", + "output": "skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting skip_disabled_recipes", + "output": "skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting skip_disabled_recipes", + "output": "skip disabled recipes config.toml: Whether to skip disabled recipes (True) or fail and show GUI message (False)." + }, + { + "prompt_type": "plain", + "instruction": ": What does acceptance_test_timeout do? : acceptance test timeout config.toml: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain acceptance_test_timeout. : acceptance test timeout config.toml: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Timeout in minutes for testing acceptance of each recipe: . : Set the acceptance test timeout config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "acceptance_test_timeout", + "output": "acceptance test timeout config.toml: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "acceptance_test_timeout", + "output": "acceptance test timeout config.toml: Timeout in minutes for testing acceptance of each recipe: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "acceptance test timeout", + "output": "acceptance test timeout config.toml: Timeout in minutes for testing acceptance of each recipe: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Timeout in minutes for testing acceptance of each recipe: ", + "output": "acceptance test timeout config.toml: Timeout in minutes for testing acceptance of each recipe: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting acceptance_test_timeout", + "output": "acceptance test timeout config.toml: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting acceptance_test_timeout", + "output": "acceptance test timeout config.toml: Timeout in minutes for testing acceptance of each recipe: Minutes to wait until a recipe's acceptance testing is aborted. A recipe is rejected if acceptancetesting is enabled and times out.One may also set timeout for a specific recipe by setting the class's staticmethod function calledacceptance_test_timeout to return number of minutes to wait until timeout doing acceptance testing.This timeout does not include the time to install required packages. " + }, + { + "prompt_type": "plain", + "instruction": ": What does contrib_reload_and_recheck_server_start do? : contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain contrib_reload_and_recheck_server_start. : contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_reload_and_recheck_server_start", + "output": "contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_reload_and_recheck_server_start", + "output": "contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib reload and recheck server start", + "output": "contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting contrib_reload_and_recheck_server_start", + "output": "contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting contrib_reload_and_recheck_server_start", + "output": "contrib reload and recheck server start config.toml: Whether to re-check recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). If any inconsistency develops, the bad recipe will be removed during re-doing acceptance testing. This process can make start-up take alot longer for many recipes, but in LTS releases the risk of recipes becoming out of date is low. If set to false, will disable acceptance re-testing during sever start but note that previews or experiments may fail if those inconsistent recipes are used. Such inconsistencies can occur when API changes for recipes or more aggressive acceptance tests are performed. " + }, + { + "prompt_type": "plain", + "instruction": ": What does contrib_install_packages_server_start do? : contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain contrib_install_packages_server_start. : contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_install_packages_server_start", + "output": "contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_install_packages_server_start", + "output": "contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib install packages server start", + "output": "contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting contrib_install_packages_server_start", + "output": "contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting contrib_install_packages_server_start", + "output": "contrib install packages server start config.toml: Whether to at least install packages required for recipes during server startup (if per_user_directories == false) or during user login (if per_user_directories == true). Important to keep True so any later use of recipes (that have global packages installed) will work. " + }, + { + "prompt_type": "plain", + "instruction": ": What does contrib_reload_and_recheck_worker_tasks do? : contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "plain", + "instruction": ": Explain contrib_reload_and_recheck_worker_tasks. : contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_reload_and_recheck_worker_tasks", + "output": "contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib_reload_and_recheck_worker_tasks", + "output": "contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "contrib reload and recheck worker tasks", + "output": "contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting contrib_reload_and_recheck_worker_tasks", + "output": "contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting contrib_reload_and_recheck_worker_tasks", + "output": "contrib reload and recheck worker tasks config.toml: Whether to re-check recipes after uploaded from main server to worker in multinode. Expensive for every task that has recipes to do this." + }, + { + "prompt_type": "plain", + "instruction": ": What does data_recipe_isolate do? : data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_recipe_isolate. : data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_recipe_isolate", + "output": "data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_recipe_isolate", + "output": "data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data recipe isolate", + "output": "data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to isolate (in fork) data recipe in case imports change needs across.: ", + "output": "data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_recipe_isolate", + "output": "data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_recipe_isolate", + "output": "data recipe isolate config.toml: Whether to isolate (in fork) data recipe in case imports change needs across.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does server_recipe_url do? : server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "plain", + "instruction": ": Explain server_recipe_url. : server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "server_recipe_url", + "output": "server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "server_recipe_url", + "output": "server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "server recipe url", + "output": "server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting server_recipe_url", + "output": "server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting server_recipe_url", + "output": "server recipe url config.toml: Space-separated string list of URLs for recipes that are loaded at user login time" + }, + { + "prompt_type": "plain", + "instruction": ": What does recipe_activation do? : recipe activation config.toml: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain recipe_activation. : recipe activation config.toml: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Recipe Activation List: . : Set the recipe activation config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe_activation", + "output": "recipe activation config.toml: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe_activation", + "output": "recipe activation config.toml: Recipe Activation List: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe activation", + "output": "recipe activation config.toml: Recipe Activation List: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Recipe Activation List: ", + "output": "recipe activation config.toml: Recipe Activation List: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting recipe_activation", + "output": "recipe activation config.toml: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting recipe_activation", + "output": "recipe activation config.toml: Recipe Activation List: List of recipes (per dict key by type) that are applicable for given experiment. This is especially relevantfor situations such as new `experiment with same params` where the user should be able touse the same recipe versions as the parent experiment if he/she wishes to. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enabled_file_systems do? : enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enabled_file_systems. : enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enabled_file_systems", + "output": "enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enabled_file_systems", + "output": "enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enabled file systems", + "output": "enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enabled_file_systems", + "output": "enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enabled_file_systems", + "output": "enabled file systems config.toml: File System Support upload : standard upload feature file : local file system/server file system hdfs : Hadoop file system, remember to configure the HDFS config folder path and keytab below dtap : Blue Data Tap file system, remember to configure the DTap section below s3 : Amazon S3, optionally configure secret and access key below gcs : Google Cloud Storage, remember to configure gcs_path_to_service_account_json below gbq : Google Big Query, remember to configure gcs_path_to_service_account_json below minio : Minio Cloud Storage, remember to configure secret and access key below snow : Snowflake Data Warehouse, remember to configure Snowflake credentials below (account name, username, password) kdb : KDB+ Time Series Database, remember to configure KDB credentials below (hostname and port, optionally: username, password, classpath, and jvm_args) azrbs : Azure Blob Storage, remember to configure Azure credentials below (account name, account key) jdbc: JDBC Connector, remember to configure JDBC below. (jdbc_app_configs) hive: Hive Connector, remember to configure Hive below. (hive_app_configs) recipe_file: Custom recipe file upload recipe_url: Custom recipe upload via url h2o_drive: H2O Drive, remember to configure `h2o_drive_endpoint_url` below feature_store: Feature Store, remember to configure feature_store_endpoint_url below " + }, + { + "prompt_type": "plain", + "instruction": ": What does file_hide_data_directory do? : file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain file_hide_data_directory. : file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file_hide_data_directory", + "output": "file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file_hide_data_directory", + "output": "file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file hide data directory", + "output": "file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting file_hide_data_directory", + "output": "file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting file_hide_data_directory", + "output": "file hide data directory config.toml: The option disable access to DAI data_directory from file browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does file_path_filtering_enabled do? : file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "plain", + "instruction": ": Explain file_path_filtering_enabled. : file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file_path_filtering_enabled", + "output": "file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file_path_filtering_enabled", + "output": "file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file path filtering enabled", + "output": "file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting file_path_filtering_enabled", + "output": "file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting file_path_filtering_enabled", + "output": "file path filtering enabled config.toml: Enable usage of path filters" + }, + { + "prompt_type": "plain", + "instruction": ": What does file_path_filter_include do? : file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "plain", + "instruction": ": Explain file_path_filter_include. : file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file_path_filter_include", + "output": "file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file_path_filter_include", + "output": "file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "file path filter include", + "output": "file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting file_path_filter_include", + "output": "file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting file_path_filter_include", + "output": "file path filter include config.toml: List of absolute path prefixes to restrict access to in file system browser. First add the following environment variable to your command line to enable this feature: file_path_filtering_enabled=true This feature can be used in the following ways (using specific path or using logged user's directory): file_path_filter_include=\"['/data/stage']\" file_path_filter_include=\"['/data/stage','/data/prod']\" file_path_filter_include=/home/{{DAI_USERNAME}}/ file_path_filter_include=\"['/home/{{DAI_USERNAME}}/','/data/stage','/data/prod']\" " + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_auth_type do? : hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_auth_type. : hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_auth_type", + "output": "hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_auth_type", + "output": "hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs auth type", + "output": "hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_auth_type", + "output": "hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_auth_type", + "output": "hdfs auth type config.toml: (Required) HDFS connector Specify HDFS Auth Type, allowed options are: noauth : (default) No authentication needed principal : Authenticate with HDFS with a principal user (DEPRECTATED - use `keytab` auth type) keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab" + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_app_principal_user do? : hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_app_principal_user. : hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_principal_user", + "output": "hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_principal_user", + "output": "hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs app principal user", + "output": "hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_app_principal_user", + "output": "hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_app_principal_user", + "output": "hdfs app principal user config.toml: Kerberos app principal user. Required when hdfs_auth_type='keytab'; recommended otherwise." + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_app_login_user do? : hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_app_login_user. : hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_login_user", + "output": "hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_login_user", + "output": "hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs app login user", + "output": "hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_app_login_user", + "output": "hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_app_login_user", + "output": "hdfs app login user config.toml: Deprecated - Do Not Use, login user is taken from the user name from login" + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_app_jvm_args do? : hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_app_jvm_args. : hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_jvm_args", + "output": "hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_jvm_args", + "output": "hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs app jvm args", + "output": "hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_app_jvm_args", + "output": "hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_app_jvm_args", + "output": "hdfs app jvm args config.toml: JVM args for HDFS distributions, provide args seperate by space -Djava.security.krb5.conf=/krb5.conf -Dsun.security.krb5.debug=True -Dlog4j.configuration=file:///log4j.properties" + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_app_classpath do? : hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_app_classpath. : hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_classpath", + "output": "hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_classpath", + "output": "hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs app classpath", + "output": "hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_app_classpath", + "output": "hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_app_classpath", + "output": "hdfs app classpath config.toml: hdfs class path" + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_app_supported_schemes do? : hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_app_supported_schemes. : hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_supported_schemes", + "output": "hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_app_supported_schemes", + "output": "hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs app supported schemes", + "output": "hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_app_supported_schemes", + "output": "hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_app_supported_schemes", + "output": "hdfs app supported schemes config.toml: List of supported DFS schemas. Ex. \"['hdfs://', 'maprfs://', 'swift://']\" Supported schemas list is used as an initial check to ensure valid input to connector " + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_max_files_listed do? : hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_max_files_listed. : hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_max_files_listed", + "output": "hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_max_files_listed", + "output": "hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs max files listed", + "output": "hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_max_files_listed", + "output": "hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_max_files_listed", + "output": "hdfs max files listed config.toml: Maximum number of files viewable in connector ui. Set to larger number to view more files" + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_init_path do? : hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_init_path. : hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_init_path", + "output": "hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_init_path", + "output": "hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs init path", + "output": "hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_init_path", + "output": "hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_init_path", + "output": "hdfs init path config.toml: Starting HDFS path displayed in UI HDFS browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_upload_init_path do? : hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_upload_init_path. : hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_upload_init_path", + "output": "hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_upload_init_path", + "output": "hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs upload init path", + "output": "hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_upload_init_path", + "output": "hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_upload_init_path", + "output": "hdfs upload init path config.toml: Starting HDFS path for the artifacts upload operations" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_mapr_multi_user_mode do? : enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_mapr_multi_user_mode. : enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mapr_multi_user_mode", + "output": "enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mapr_multi_user_mode", + "output": "enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable mapr multi user mode", + "output": "enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_mapr_multi_user_mode", + "output": "enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_mapr_multi_user_mode", + "output": "enable mapr multi user mode config.toml: Enables the multi-user mode for MapR integration, which allows to have MapR ticket per user." + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_auth_type do? : dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_auth_type. : dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_auth_type", + "output": "dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_auth_type", + "output": "dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap auth type", + "output": "dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_auth_type", + "output": "dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_auth_type", + "output": "dtap auth type config.toml: Blue Data DTap connector settings are similar to HDFS connector settings. Specify DTap Auth Type, allowed options are: noauth : No authentication needed principal : Authenticate with DTab with a principal user keytab : Authenticate with a Key tab (recommended). If running DAI as a service, then the Kerberos keytab needs to be owned by the DAI user. keytabimpersonation : Login with impersonation using a keytab NOTE: \"hdfs_app_classpath\" and \"core_site_xml_path\" are both required to be set for DTap connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_config_path do? : dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_config_path. : dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_config_path", + "output": "dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_config_path", + "output": "dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap config path", + "output": "dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_config_path", + "output": "dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_config_path", + "output": "dtap config path config.toml: Dtap (HDFS) config folder path , can contain multiple config files" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_key_tab_path do? : dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_key_tab_path. : dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_key_tab_path", + "output": "dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_key_tab_path", + "output": "dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap key tab path", + "output": "dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_key_tab_path", + "output": "dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_key_tab_path", + "output": "dtap key tab path config.toml: Path of the principal key tab file, dtap_key_tab_path is deprecated. Please use dtap_keytab_path" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_keytab_path do? : dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_keytab_path. : dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_keytab_path", + "output": "dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_keytab_path", + "output": "dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap keytab path", + "output": "dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_keytab_path", + "output": "dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_keytab_path", + "output": "dtap keytab path config.toml: Path of the principal key tab file" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_app_principal_user do? : dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_app_principal_user. : dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_principal_user", + "output": "dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_principal_user", + "output": "dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap app principal user", + "output": "dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_app_principal_user", + "output": "dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_app_principal_user", + "output": "dtap app principal user config.toml: Kerberos app principal user (recommended)" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_app_login_user do? : dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_app_login_user. : dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_login_user", + "output": "dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_login_user", + "output": "dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap app login user", + "output": "dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_app_login_user", + "output": "dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_app_login_user", + "output": "dtap app login user config.toml: Specify the user id of the current user here as user@realm" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_app_jvm_args do? : dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_app_jvm_args. : dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_jvm_args", + "output": "dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_jvm_args", + "output": "dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap app jvm args", + "output": "dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_app_jvm_args", + "output": "dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_app_jvm_args", + "output": "dtap app jvm args config.toml: JVM args for DTap distributions, provide args seperate by space" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_app_classpath do? : dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_app_classpath. : dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_classpath", + "output": "dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_app_classpath", + "output": "dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap app classpath", + "output": "dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_app_classpath", + "output": "dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_app_classpath", + "output": "dtap app classpath config.toml: DTap (HDFS) class path. NOTE: set 'hdfs_app_classpath' also" + }, + { + "prompt_type": "plain", + "instruction": ": What does dtap_init_path do? : dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dtap_init_path. : dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_init_path", + "output": "dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap_init_path", + "output": "dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dtap init path", + "output": "dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dtap_init_path", + "output": "dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dtap_init_path", + "output": "dtap init path config.toml: Starting DTAP path displayed in UI DTAP browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does aws_access_key_id do? : aws access key id config.toml: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain aws_access_key_id. : aws access key id config.toml: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: AWS Access Key ID: . : Set the aws access key id config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_access_key_id", + "output": "aws access key id config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_access_key_id", + "output": "aws access key id config.toml: AWS Access Key ID: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws access key id", + "output": "aws access key id config.toml: AWS Access Key ID: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "AWS Access Key ID: ", + "output": "aws access key id config.toml: AWS Access Key ID: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting aws_access_key_id", + "output": "aws access key id config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting aws_access_key_id", + "output": "aws access key id config.toml: AWS Access Key ID: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does aws_secret_access_key do? : aws secret access key config.toml: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain aws_secret_access_key. : aws secret access key config.toml: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: AWS Secret Access Key: . : Set the aws secret access key config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_secret_access_key", + "output": "aws secret access key config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_secret_access_key", + "output": "aws secret access key config.toml: AWS Secret Access Key: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws secret access key", + "output": "aws secret access key config.toml: AWS Secret Access Key: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "AWS Secret Access Key: ", + "output": "aws secret access key config.toml: AWS Secret Access Key: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting aws_secret_access_key", + "output": "aws secret access key config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting aws_secret_access_key", + "output": "aws secret access key config.toml: AWS Secret Access Key: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does aws_role_arn do? : aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain aws_role_arn. : aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_role_arn", + "output": "aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_role_arn", + "output": "aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws role arn", + "output": "aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting aws_role_arn", + "output": "aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting aws_role_arn", + "output": "aws role arn config.toml: S3 Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does aws_default_region do? : aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain aws_default_region. : aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_default_region", + "output": "aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_default_region", + "output": "aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws default region", + "output": "aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting aws_default_region", + "output": "aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting aws_default_region", + "output": "aws default region config.toml: What region to use when none is specified in the s3 url. Ignored when aws_s3_endpoint_url is set. " + }, + { + "prompt_type": "plain", + "instruction": ": What does aws_s3_endpoint_url do? : aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "plain", + "instruction": ": Explain aws_s3_endpoint_url. : aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_s3_endpoint_url", + "output": "aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_s3_endpoint_url", + "output": "aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws s3 endpoint url", + "output": "aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting aws_s3_endpoint_url", + "output": "aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting aws_s3_endpoint_url", + "output": "aws s3 endpoint url config.toml: Sets endpoint URL that will be used to access S3." + }, + { + "prompt_type": "plain", + "instruction": ": What does aws_use_ec2_role_credentials do? : aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "plain", + "instruction": ": Explain aws_use_ec2_role_credentials. : aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_use_ec2_role_credentials", + "output": "aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws_use_ec2_role_credentials", + "output": "aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "aws use ec2 role credentials", + "output": "aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting aws_use_ec2_role_credentials", + "output": "aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting aws_use_ec2_role_credentials", + "output": "aws use ec2 role credentials config.toml: If set to true S3 Connector will try to to obtain credentials associated with the role attached to the EC2 instance." + }, + { + "prompt_type": "plain", + "instruction": ": What does s3_init_path do? : s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain s3_init_path. : s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3_init_path", + "output": "s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3_init_path", + "output": "s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3 init path", + "output": "s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting s3_init_path", + "output": "s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting s3_init_path", + "output": "s3 init path config.toml: Starting S3 path displayed in UI S3 browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does s3_skip_cert_verification do? : s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain s3_skip_cert_verification. : s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3_skip_cert_verification", + "output": "s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3_skip_cert_verification", + "output": "s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3 skip cert verification", + "output": "s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting s3_skip_cert_verification", + "output": "s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting s3_skip_cert_verification", + "output": "s3 skip cert verification config.toml: S3 Connector will skip cert verification if this is set to true, (mostly used for S3-like connectors, e.g. Ceph)" + }, + { + "prompt_type": "plain", + "instruction": ": What does s3_connector_cert_location do? : s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain s3_connector_cert_location. : s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3_connector_cert_location", + "output": "s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3_connector_cert_location", + "output": "s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "s3 connector cert location", + "output": "s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting s3_connector_cert_location", + "output": "s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting s3_connector_cert_location", + "output": "s3 connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the S3 connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does gcs_path_to_service_account_json do? : gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "plain", + "instruction": ": Explain gcs_path_to_service_account_json. : gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_path_to_service_account_json", + "output": "gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_path_to_service_account_json", + "output": "gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs path to service account json", + "output": "gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gcs_path_to_service_account_json", + "output": "gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gcs_path_to_service_account_json", + "output": "gcs path to service account json config.toml: GCS Connector credentials example (suggested) -- '/licenses/my_service_account_json.json' " + }, + { + "prompt_type": "plain", + "instruction": ": What does gcs_init_path do? : gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain gcs_init_path. : gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_init_path", + "output": "gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_init_path", + "output": "gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs init path", + "output": "gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gcs_init_path", + "output": "gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gcs_init_path", + "output": "gcs init path config.toml: Starting GCS path displayed in UI GCS browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does gcs_access_token_scopes do? : gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "plain", + "instruction": ": Explain gcs_access_token_scopes. : gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_access_token_scopes", + "output": "gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_access_token_scopes", + "output": "gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs access token scopes", + "output": "gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gcs_access_token_scopes", + "output": "gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gcs_access_token_scopes", + "output": "gcs access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google Cloud Storage" + }, + { + "prompt_type": "plain", + "instruction": ": What does gcs_default_project_id do? : gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "plain", + "instruction": ": Explain gcs_default_project_id. : gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_default_project_id", + "output": "gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs_default_project_id", + "output": "gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gcs default project id", + "output": "gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gcs_default_project_id", + "output": "gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gcs_default_project_id", + "output": "gcs default project id config.toml: When ``google_cloud_use_oauth`` is enabled, Google Cloud client cannot automatically infer the default project, thus it must be explicitly specified" + }, + { + "prompt_type": "plain", + "instruction": ": What does gbq_access_token_scopes do? : gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "plain", + "instruction": ": Explain gbq_access_token_scopes. : gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gbq_access_token_scopes", + "output": "gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gbq_access_token_scopes", + "output": "gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gbq access token scopes", + "output": "gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gbq_access_token_scopes", + "output": "gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gbq_access_token_scopes", + "output": "gbq access token scopes config.toml: Space-seperated list of OAuth2 scopes for the access token used to authenticate in Google BigQuery" + }, + { + "prompt_type": "plain", + "instruction": ": What does google_cloud_use_oauth do? : google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "plain", + "instruction": ": Explain google_cloud_use_oauth. : google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "google_cloud_use_oauth", + "output": "google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "google_cloud_use_oauth", + "output": "google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "google cloud use oauth", + "output": "google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting google_cloud_use_oauth", + "output": "google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting google_cloud_use_oauth", + "output": "google cloud use oauth config.toml: By default the DriverlessAI Google Cloud Storage and BigQuery connectors are using service account file to retrieve authentication credentials.When enabled, the Storage and BigQuery connectors will use OAuth2 user access tokens to authenticate in Google Cloud instead." + }, + { + "prompt_type": "plain", + "instruction": ": What does minio_endpoint_url do? : minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain minio_endpoint_url. : minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_endpoint_url", + "output": "minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_endpoint_url", + "output": "minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio endpoint url", + "output": "minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting minio_endpoint_url", + "output": "minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting minio_endpoint_url", + "output": "minio endpoint url config.toml: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does minio_access_key_id do? : minio access key id config.toml: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain minio_access_key_id. : minio access key id config.toml: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minio Access Key ID: . : Set the minio access key id config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_access_key_id", + "output": "minio access key id config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_access_key_id", + "output": "minio access key id config.toml: Minio Access Key ID: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio access key id", + "output": "minio access key id config.toml: Minio Access Key ID: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minio Access Key ID: ", + "output": "minio access key id config.toml: Minio Access Key ID: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting minio_access_key_id", + "output": "minio access key id config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting minio_access_key_id", + "output": "minio access key id config.toml: Minio Access Key ID: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does minio_secret_access_key do? : minio secret access key config.toml: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain minio_secret_access_key. : minio secret access key config.toml: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minio Secret Access Key: . : Set the minio secret access key config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_secret_access_key", + "output": "minio secret access key config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_secret_access_key", + "output": "minio secret access key config.toml: Minio Secret Access Key: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio secret access key", + "output": "minio secret access key config.toml: Minio Secret Access Key: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minio Secret Access Key: ", + "output": "minio secret access key config.toml: Minio Secret Access Key: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting minio_secret_access_key", + "output": "minio secret access key config.toml: Minio Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting minio_secret_access_key", + "output": "minio secret access key config.toml: Minio Secret Access Key: Minio Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does minio_skip_cert_verification do? : minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "plain", + "instruction": ": Explain minio_skip_cert_verification. : minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_skip_cert_verification", + "output": "minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_skip_cert_verification", + "output": "minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio skip cert verification", + "output": "minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting minio_skip_cert_verification", + "output": "minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting minio_skip_cert_verification", + "output": "minio skip cert verification config.toml: Minio Connector will skip cert verification if this is set to true" + }, + { + "prompt_type": "plain", + "instruction": ": What does minio_connector_cert_location do? : minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain minio_connector_cert_location. : minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_connector_cert_location", + "output": "minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_connector_cert_location", + "output": "minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio connector cert location", + "output": "minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting minio_connector_cert_location", + "output": "minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting minio_connector_cert_location", + "output": "minio connector cert location config.toml: path/to/cert/bundle.pem - A filename of the CA cert bundle to use for the Minio connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does minio_init_path do? : minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain minio_init_path. : minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_init_path", + "output": "minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio_init_path", + "output": "minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "minio init path", + "output": "minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting minio_init_path", + "output": "minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting minio_init_path", + "output": "minio init path config.toml: Starting Minio path displayed in UI Minio browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_drive_endpoint_url do? : h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_drive_endpoint_url. : h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_drive_endpoint_url", + "output": "h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_drive_endpoint_url", + "output": "h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o drive endpoint url", + "output": "h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_drive_endpoint_url", + "output": "h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_drive_endpoint_url", + "output": "h2o drive endpoint url config.toml: H2O Drive server endpoint URL" + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_drive_access_token_scopes do? : h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_drive_access_token_scopes. : h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_drive_access_token_scopes", + "output": "h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_drive_access_token_scopes", + "output": "h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o drive access token scopes", + "output": "h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_drive_access_token_scopes", + "output": "h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_drive_access_token_scopes", + "output": "h2o drive access token scopes config.toml: Space seperated list of OpenID scopes for the access token used by the H2O Drive connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_drive_session_duration do? : h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_drive_session_duration. : h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_drive_session_duration", + "output": "h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_drive_session_duration", + "output": "h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o drive session duration", + "output": "h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_drive_session_duration", + "output": "h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_drive_session_duration", + "output": "h2o drive session duration config.toml: Maximum duration (in seconds) for a session with the H2O Drive" + }, + { + "prompt_type": "plain", + "instruction": ": What does snowflake_url do? : snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain snowflake_url. : snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_url", + "output": "snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_url", + "output": "snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake url", + "output": "snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting snowflake_url", + "output": "snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting snowflake_url", + "output": "snowflake url config.toml: Recommended Provide: url, user, password Optionally Provide: account, user, password Example URL: https://..snowflakecomputing.com Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does snowflake_user do? : snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain snowflake_user. : snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_user", + "output": "snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_user", + "output": "snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake user", + "output": "snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting snowflake_user", + "output": "snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting snowflake_user", + "output": "snowflake user config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does snowflake_password do? : snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain snowflake_password. : snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_password", + "output": "snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_password", + "output": "snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake password", + "output": "snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting snowflake_password", + "output": "snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting snowflake_password", + "output": "snowflake password config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does snowflake_account do? : snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain snowflake_account. : snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_account", + "output": "snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_account", + "output": "snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake account", + "output": "snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting snowflake_account", + "output": "snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting snowflake_account", + "output": "snowflake account config.toml: Snowflake Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does snowflake_allow_stages do? : snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain snowflake_allow_stages. : snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_allow_stages", + "output": "snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_allow_stages", + "output": "snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake allow stages", + "output": "snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting snowflake_allow_stages", + "output": "snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting snowflake_allow_stages", + "output": "snowflake allow stages config.toml: Setting to allow or disallow Snowflake connector from using Snowflake stages during queries. True - will permit the connector to use stages and generally improves performance. However, if the Snowflake user does not have permission to create/use stages will end in errors. False - will prevent the connector from using stages, thus Snowflake users without permission to create/use stages will have successful queries, however may significantly negatively impact query performance. " + }, + { + "prompt_type": "plain", + "instruction": ": What does snowflake_batch_size do? : snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain snowflake_batch_size. : snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_batch_size", + "output": "snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake_batch_size", + "output": "snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "snowflake batch size", + "output": "snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting snowflake_batch_size", + "output": "snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting snowflake_batch_size", + "output": "snowflake batch size config.toml: Sets the number of rows to be fetched by Snowflake cursor at one time. This is only used if setting `snowflake_allow_stages` is set to False, may help with performance depending on the type and size of data being queried. " + }, + { + "prompt_type": "plain", + "instruction": ": What does kdb_user do? : kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain kdb_user. : kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_user", + "output": "kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_user", + "output": "kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb user", + "output": "kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kdb_user", + "output": "kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kdb_user", + "output": "kdb user config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does kdb_password do? : kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain kdb_password. : kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_password", + "output": "kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_password", + "output": "kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb password", + "output": "kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kdb_password", + "output": "kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kdb_password", + "output": "kdb password config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does kdb_hostname do? : kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain kdb_hostname. : kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_hostname", + "output": "kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_hostname", + "output": "kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb hostname", + "output": "kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kdb_hostname", + "output": "kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kdb_hostname", + "output": "kdb hostname config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does kdb_port do? : kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain kdb_port. : kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_port", + "output": "kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_port", + "output": "kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb port", + "output": "kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kdb_port", + "output": "kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kdb_port", + "output": "kdb port config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does kdb_app_classpath do? : kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain kdb_app_classpath. : kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_app_classpath", + "output": "kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_app_classpath", + "output": "kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb app classpath", + "output": "kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kdb_app_classpath", + "output": "kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kdb_app_classpath", + "output": "kdb app classpath config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does kdb_app_jvm_args do? : kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": Explain kdb_app_jvm_args. : kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_app_jvm_args", + "output": "kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb_app_jvm_args", + "output": "kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "kdb app jvm args", + "output": "kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting kdb_app_jvm_args", + "output": "kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting kdb_app_jvm_args", + "output": "kdb app jvm args config.toml: KDB Connector credentials" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_account_name do? : azure blob account name config.toml: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_account_name. : azure blob account name config.toml: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store Account Name: . : Set the azure blob account name config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_account_name", + "output": "azure blob account name config.toml: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_account_name", + "output": "azure blob account name config.toml: Azure Blob Store Account Name: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob account name", + "output": "azure blob account name config.toml: Azure Blob Store Account Name: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store Account Name: ", + "output": "azure blob account name config.toml: Azure Blob Store Account Name: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_account_name", + "output": "azure blob account name config.toml: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_account_name", + "output": "azure blob account name config.toml: Azure Blob Store Account Name: Account name for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_account_key do? : azure blob account key config.toml: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_account_key. : azure blob account key config.toml: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store Account Key: . : Set the azure blob account key config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_account_key", + "output": "azure blob account key config.toml: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_account_key", + "output": "azure blob account key config.toml: Azure Blob Store Account Key: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob account key", + "output": "azure blob account key config.toml: Azure Blob Store Account Key: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store Account Key: ", + "output": "azure blob account key config.toml: Azure Blob Store Account Key: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_account_key", + "output": "azure blob account key config.toml: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_account_key", + "output": "azure blob account key config.toml: Azure Blob Store Account Key: Account key for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_connection_string do? : azure connection string config.toml: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_connection_string. : azure connection string config.toml: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store Connection String: . : Set the azure connection string config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_connection_string", + "output": "azure connection string config.toml: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_connection_string", + "output": "azure connection string config.toml: Azure Blob Store Connection String: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure connection string", + "output": "azure connection string config.toml: Azure Blob Store Connection String: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store Connection String: ", + "output": "azure connection string config.toml: Azure Blob Store Connection String: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_connection_string", + "output": "azure connection string config.toml: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_connection_string", + "output": "azure connection string config.toml: Azure Blob Store Connection String: Connection string for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_sas_token do? : azure sas token config.toml: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_sas_token. : azure sas token config.toml: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store SAS token: . : Set the azure sas token config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_sas_token", + "output": "azure sas token config.toml: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_sas_token", + "output": "azure sas token config.toml: Azure Blob Store SAS token: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure sas token", + "output": "azure sas token config.toml: Azure Blob Store SAS token: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store SAS token: ", + "output": "azure sas token config.toml: Azure Blob Store SAS token: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_sas_token", + "output": "azure sas token config.toml: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_sas_token", + "output": "azure sas token config.toml: Azure Blob Store SAS token: SAS token for Azure Blob Store Connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_init_path do? : azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_init_path. : azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_init_path", + "output": "azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_init_path", + "output": "azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob init path", + "output": "azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_init_path", + "output": "azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_init_path", + "output": "azure blob init path config.toml: Starting Azure blob store path displayed in UI Azure blob store browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_use_access_token do? : azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_use_access_token. : azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_use_access_token", + "output": "azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_use_access_token", + "output": "azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob use access token", + "output": "azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_use_access_token", + "output": "azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_use_access_token", + "output": "azure blob use access token config.toml: When enabled, Azure Blob Store Connector will use access token derived from the credentials received on login with OpenID Connect." + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_use_access_token_scopes do? : azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_use_access_token_scopes. : azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_use_access_token_scopes", + "output": "azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_use_access_token_scopes", + "output": "azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob use access token scopes", + "output": "azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_use_access_token_scopes", + "output": "azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_use_access_token_scopes", + "output": "azure blob use access token scopes config.toml: Configures the scopes for the access token used by Azure Blob Store Connector when the azure_blob_use_access_token us enabled. (space separated list)" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_use_access_token_source do? : azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_use_access_token_source. : azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_use_access_token_source", + "output": "azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_use_access_token_source", + "output": "azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob use access token source", + "output": "azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_use_access_token_source", + "output": "azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_use_access_token_source", + "output": "azure blob use access token source config.toml: Sets the source of the access token for accessing the Azure bob store KEYCLOAK: Will exchange the session access token for the federated refresh token with Keycloak and use it to obtain the access token directly with the Azure AD. SESSION: Will use the access token derived from the credentials received on login with OpenID Connect. " + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_keycloak_aad_client_id do? : azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_keycloak_aad_client_id. : azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_aad_client_id", + "output": "azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_aad_client_id", + "output": "azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob keycloak aad client id", + "output": "azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_keycloak_aad_client_id", + "output": "azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_keycloak_aad_client_id", + "output": "azure blob keycloak aad client id config.toml: Application (client) ID registered on Azure AD when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_keycloak_aad_client_secret do? : azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_keycloak_aad_client_secret. : azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_aad_client_secret", + "output": "azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_aad_client_secret", + "output": "azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob keycloak aad client secret", + "output": "azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_keycloak_aad_client_secret", + "output": "azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_keycloak_aad_client_secret", + "output": "azure blob keycloak aad client secret config.toml: Application (client) secret when the KEYCLOAK source is enabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_keycloak_aad_auth_uri do? : azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_keycloak_aad_auth_uri. : azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_aad_auth_uri", + "output": "azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_aad_auth_uri", + "output": "azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob keycloak aad auth uri", + "output": "azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_keycloak_aad_auth_uri", + "output": "azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_keycloak_aad_auth_uri", + "output": "azure blob keycloak aad auth uri config.toml: A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_blob_keycloak_broker_token_endpoint do? : azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_blob_keycloak_broker_token_endpoint. : azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_broker_token_endpoint", + "output": "azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_blob_keycloak_broker_token_endpoint", + "output": "azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure blob keycloak broker token endpoint", + "output": "azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_blob_keycloak_broker_token_endpoint", + "output": "azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_blob_keycloak_broker_token_endpoint", + "output": "azure blob keycloak broker token endpoint config.toml: Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_enable_token_auth_aad do? : azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_enable_token_auth_aad. : azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_enable_token_auth_aad", + "output": "azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_enable_token_auth_aad", + "output": "azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure enable token auth aad", + "output": "azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_enable_token_auth_aad", + "output": "azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_enable_token_auth_aad", + "output": "azure enable token auth aad config.toml: (DEPRECATED, use azure_blob_use_access_token and azure_blob_use_access_token_source=\"KEYCLOAK\" instead.) (When enabled only DEPRECATED options azure_ad_client_id, azure_ad_client_secret, azure_ad_auth_uri and azure_keycloak_idp_token_endpoint will be effective) This is equivalent to setting azure_blob_use_access_token_source = \"KEYCLOAK\" and setting azure_blob_keycloak_aad_client_id, azure_blob_keycloak_aad_client_secret, azure_blob_keycloak_aad_auth_uri and azure_blob_keycloak_broker_token_endpoint options. ) If true, enable the Azure Blob Storage Connector to use Azure AD tokens obtained from the Keycloak for auth. " + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_ad_client_id do? : azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_ad_client_id. : azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_client_id", + "output": "azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_client_id", + "output": "azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure ad client id", + "output": "azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_ad_client_id", + "output": "azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_ad_client_id", + "output": "azure ad client id config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_id instead.) Application (client) ID registered on Azure AD" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_ad_client_secret do? : azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_ad_client_secret. : azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_client_secret", + "output": "azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_client_secret", + "output": "azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure ad client secret", + "output": "azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_ad_client_secret", + "output": "azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_ad_client_secret", + "output": "azure ad client secret config.toml: (DEPRECATED, use azure_blob_keycloak_aad_client_secret instead.) Application Client Secret" + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_ad_auth_uri do? : azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_ad_auth_uri. : azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_auth_uri", + "output": "azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_auth_uri", + "output": "azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure ad auth uri", + "output": "azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_ad_auth_uri", + "output": "azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_ad_auth_uri", + "output": "azure ad auth uri config.toml: (DEPRECATED, use azure_blob_keycloak_aad_auth_uri instead)A URL that identifies a token authority. It should be of the format https://login.microsoftonline.com/your_tenant " + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_ad_scopes do? : azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_ad_scopes. : azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_scopes", + "output": "azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_ad_scopes", + "output": "azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure ad scopes", + "output": "azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_ad_scopes", + "output": "azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_ad_scopes", + "output": "azure ad scopes config.toml: (DEPRECATED, use azure_blob_use_access_token_scopes instead.)Scopes requested to access a protected API (a resource)." + }, + { + "prompt_type": "plain", + "instruction": ": What does azure_keycloak_idp_token_endpoint do? : azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain azure_keycloak_idp_token_endpoint. : azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_keycloak_idp_token_endpoint", + "output": "azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure_keycloak_idp_token_endpoint", + "output": "azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "azure keycloak idp token endpoint", + "output": "azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting azure_keycloak_idp_token_endpoint", + "output": "azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting azure_keycloak_idp_token_endpoint", + "output": "azure keycloak idp token endpoint config.toml: (DEPRECATED, use azure_blob_keycloak_broker_token_endpoint instead.)Keycloak Endpoint for Retrieving External IDP Tokens (https://www.keycloak.org/docs/latest/server_admin/#retrieving-external-idp-tokens)" + }, + { + "prompt_type": "plain", + "instruction": ": What does jdbc_app_configs do? : jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "plain", + "instruction": ": Explain jdbc_app_configs. : jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc_app_configs", + "output": "jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc_app_configs", + "output": "jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc app configs", + "output": "jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting jdbc_app_configs", + "output": "jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting jdbc_app_configs", + "output": "jdbc app configs config.toml: Configuration for JDBC Connector. JSON/Dictionary String with multiple keys. Format as a single line without using carriage returns (the following example is formatted for readability). Use triple quotations to ensure that the text is read as a single string. Example: '{ \"postgres\": { \"url\": \"jdbc:postgresql://ip address:port/postgres\", \"jarpath\": \"/path/to/postgres_driver.jar\", \"classpath\": \"org.postgresql.Driver\" }, \"mysql\": { \"url\":\"mysql connection string\", \"jarpath\": \"/path/to/mysql_driver.jar\", \"classpath\": \"my.sql.classpath.Driver\" } }' " + }, + { + "prompt_type": "plain", + "instruction": ": What does jdbc_app_jvm_args do? : jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain jdbc_app_jvm_args. : jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc_app_jvm_args", + "output": "jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc_app_jvm_args", + "output": "jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc app jvm args", + "output": "jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting jdbc_app_jvm_args", + "output": "jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting jdbc_app_jvm_args", + "output": "jdbc app jvm args config.toml: extra jvm args for jdbc connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does jdbc_app_classpath do? : jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain jdbc_app_classpath. : jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc_app_classpath", + "output": "jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc_app_classpath", + "output": "jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jdbc app classpath", + "output": "jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting jdbc_app_classpath", + "output": "jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting jdbc_app_classpath", + "output": "jdbc app classpath config.toml: alternative classpath for jdbc connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does hive_app_configs do? : hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "plain", + "instruction": ": Explain hive_app_configs. : hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive_app_configs", + "output": "hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive_app_configs", + "output": "hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive app configs", + "output": "hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hive_app_configs", + "output": "hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hive_app_configs", + "output": "hive app configs config.toml: Configuration for Hive Connector. Note that inputs are similar to configuring HDFS connectivity. important keys: * hive_conf_path - path to hive configuration, may have multiple files. typically: hive-site.xml, hdfs-site.xml, etc * auth_type - one of `noauth`, `keytab`, `keytabimpersonation` for kerberos authentication * keytab_path - path to the kerberos keytab to use for authentication, can be \"\" if using `noauth` auth_type * principal_user - Kerberos app principal user. Required when using auth_type `keytab` or `keytabimpersonation` JSON/Dictionary String with multiple keys. Example: '{ \"hive_connection_1\": { \"hive_conf_path\": \"/path/to/hive/conf\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"hive/localhost@EXAMPLE.COM\", }, \"hive_connection_2\": { \"hive_conf_path\": \"/path/to/hive/conf_2\", \"auth_type\": \"one of ['noauth', 'keytab', 'keytabimpersonation']\", \"keytab_path\": \"/path/to/.keytab\", \"principal_user\": \"my_user/localhost@EXAMPLE.COM\", } }' " + }, + { + "prompt_type": "plain", + "instruction": ": What does hive_app_jvm_args do? : hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hive_app_jvm_args. : hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive_app_jvm_args", + "output": "hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive_app_jvm_args", + "output": "hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive app jvm args", + "output": "hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hive_app_jvm_args", + "output": "hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hive_app_jvm_args", + "output": "hive app jvm args config.toml: Extra jvm args for hive connector" + }, + { + "prompt_type": "plain", + "instruction": ": What does hive_app_classpath do? : hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "plain", + "instruction": ": Explain hive_app_classpath. : hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive_app_classpath", + "output": "hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive_app_classpath", + "output": "hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hive app classpath", + "output": "hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hive_app_classpath", + "output": "hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hive_app_classpath", + "output": "hive app classpath config.toml: Alternative classpath for hive connector. Can be used to add additional jar files to classpath." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_artifacts_upload do? : enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_artifacts_upload. : enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_artifacts_upload", + "output": "enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_artifacts_upload", + "output": "enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable artifacts upload", + "output": "enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_artifacts_upload", + "output": "enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_artifacts_upload", + "output": "enable artifacts upload config.toml: Replace all the downloads on the experiment page to exports and allow users to push to the artifact store configured with artifacts_store" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_store do? : artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_store. : artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_store", + "output": "artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_store", + "output": "artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts store", + "output": "artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_store", + "output": "artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_store", + "output": "artifacts store config.toml: Artifacts store. file_system: stores artifacts on a file system directory denoted by artifacts_file_system_directory. s3: stores artifacts to S3 bucket. bitbucket: stores data into Bitbucket repository. azure: stores data into Azure Blob Store. hdfs: stores data into a Hadoop distributed file system location. " + }, + { + "prompt_type": "plain", + "instruction": ": What does bitbucket_skip_cert_verification do? : bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "plain", + "instruction": ": Explain bitbucket_skip_cert_verification. : bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bitbucket_skip_cert_verification", + "output": "bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bitbucket_skip_cert_verification", + "output": "bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bitbucket skip cert verification", + "output": "bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting bitbucket_skip_cert_verification", + "output": "bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting bitbucket_skip_cert_verification", + "output": "bitbucket skip cert verification config.toml: Decide whether to skip cert verification for Bitbucket when using a repo with HTTPS" + }, + { + "prompt_type": "plain", + "instruction": ": What does bitbucket_tmp_relative_dir do? : bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "plain", + "instruction": ": Explain bitbucket_tmp_relative_dir. : bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bitbucket_tmp_relative_dir", + "output": "bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bitbucket_tmp_relative_dir", + "output": "bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "bitbucket tmp relative dir", + "output": "bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting bitbucket_tmp_relative_dir", + "output": "bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting bitbucket_tmp_relative_dir", + "output": "bitbucket tmp relative dir config.toml: Local temporary directory to clone artifacts to, relative to data_directory" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_file_system_directory do? : artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_file_system_directory. : artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_file_system_directory", + "output": "artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_file_system_directory", + "output": "artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts file system directory", + "output": "artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_file_system_directory", + "output": "artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_file_system_directory", + "output": "artifacts file system directory config.toml: File system location where artifacts will be copied in case artifacts_store is set to file_system" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_s3_bucket do? : artifacts s3 bucket config.toml: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_s3_bucket. : artifacts s3 bucket config.toml: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: AWS S3 Bucket Name: . : Set the artifacts s3 bucket config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_s3_bucket", + "output": "artifacts s3 bucket config.toml: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_s3_bucket", + "output": "artifacts s3 bucket config.toml: AWS S3 Bucket Name: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts s3 bucket", + "output": "artifacts s3 bucket config.toml: AWS S3 Bucket Name: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "AWS S3 Bucket Name: ", + "output": "artifacts s3 bucket config.toml: AWS S3 Bucket Name: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_s3_bucket", + "output": "artifacts s3 bucket config.toml: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_s3_bucket", + "output": "artifacts s3 bucket config.toml: AWS S3 Bucket Name: AWS S3 bucket used for experiment artifact export." + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_azure_blob_account_name do? : artifacts azure blob account name config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_azure_blob_account_name. : artifacts azure blob account name config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store Account Name: . : Set the artifacts azure blob account name config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_blob_account_name", + "output": "artifacts azure blob account name config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_blob_account_name", + "output": "artifacts azure blob account name config.toml: Azure Blob Store Account Name: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts azure blob account name", + "output": "artifacts azure blob account name config.toml: Azure Blob Store Account Name: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store Account Name: ", + "output": "artifacts azure blob account name config.toml: Azure Blob Store Account Name: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_azure_blob_account_name", + "output": "artifacts azure blob account name config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_azure_blob_account_name", + "output": "artifacts azure blob account name config.toml: Azure Blob Store Account Name: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_azure_blob_account_key do? : artifacts azure blob account key config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_azure_blob_account_key. : artifacts azure blob account key config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store Account Key: . : Set the artifacts azure blob account key config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_blob_account_key", + "output": "artifacts azure blob account key config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_blob_account_key", + "output": "artifacts azure blob account key config.toml: Azure Blob Store Account Key: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts azure blob account key", + "output": "artifacts azure blob account key config.toml: Azure Blob Store Account Key: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store Account Key: ", + "output": "artifacts azure blob account key config.toml: Azure Blob Store Account Key: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_azure_blob_account_key", + "output": "artifacts azure blob account key config.toml: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_azure_blob_account_key", + "output": "artifacts azure blob account key config.toml: Azure Blob Store Account Key: Azure Blob Store credentials used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_azure_connection_string do? : artifacts azure connection string config.toml: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_azure_connection_string. : artifacts azure connection string config.toml: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store Connection String: . : Set the artifacts azure connection string config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_connection_string", + "output": "artifacts azure connection string config.toml: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_connection_string", + "output": "artifacts azure connection string config.toml: Azure Blob Store Connection String: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts azure connection string", + "output": "artifacts azure connection string config.toml: Azure Blob Store Connection String: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store Connection String: ", + "output": "artifacts azure connection string config.toml: Azure Blob Store Connection String: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_azure_connection_string", + "output": "artifacts azure connection string config.toml: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_azure_connection_string", + "output": "artifacts azure connection string config.toml: Azure Blob Store Connection String: Azure Blob Store connection string used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_azure_sas_token do? : artifacts azure sas token config.toml: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_azure_sas_token. : artifacts azure sas token config.toml: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Azure Blob Store SAS token: . : Set the artifacts azure sas token config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_sas_token", + "output": "artifacts azure sas token config.toml: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_azure_sas_token", + "output": "artifacts azure sas token config.toml: Azure Blob Store SAS token: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts azure sas token", + "output": "artifacts azure sas token config.toml: Azure Blob Store SAS token: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Azure Blob Store SAS token: ", + "output": "artifacts azure sas token config.toml: Azure Blob Store SAS token: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_azure_sas_token", + "output": "artifacts azure sas token config.toml: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_azure_sas_token", + "output": "artifacts azure sas token config.toml: Azure Blob Store SAS token: Azure Blob Store SAS token used for experiment artifact export" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_git_user do? : artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_git_user. : artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_user", + "output": "artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_user", + "output": "artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts git user", + "output": "artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_git_user", + "output": "artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_git_user", + "output": "artifacts git user config.toml: Git auth user" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_git_password do? : artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_git_password. : artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_password", + "output": "artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_password", + "output": "artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts git password", + "output": "artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_git_password", + "output": "artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_git_password", + "output": "artifacts git password config.toml: Git auth password" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_git_repo do? : artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_git_repo. : artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_repo", + "output": "artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_repo", + "output": "artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts git repo", + "output": "artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_git_repo", + "output": "artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_git_repo", + "output": "artifacts git repo config.toml: Git repo where artifacts will be pushed upon and upload" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_git_branch do? : artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_git_branch. : artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_branch", + "output": "artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_branch", + "output": "artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts git branch", + "output": "artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_git_branch", + "output": "artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_git_branch", + "output": "artifacts git branch config.toml: Git branch on the remote repo where artifacts are pushed" + }, + { + "prompt_type": "plain", + "instruction": ": What does artifacts_git_ssh_private_key_file_location do? : artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "plain", + "instruction": ": Explain artifacts_git_ssh_private_key_file_location. : artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_ssh_private_key_file_location", + "output": "artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts_git_ssh_private_key_file_location", + "output": "artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "artifacts git ssh private key file location", + "output": "artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting artifacts_git_ssh_private_key_file_location", + "output": "artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting artifacts_git_ssh_private_key_file_location", + "output": "artifacts git ssh private key file location config.toml: File location for the ssh private key used for git authentication" + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_store_endpoint_url do? : feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_store_endpoint_url. : feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_endpoint_url", + "output": "feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_endpoint_url", + "output": "feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature store endpoint url", + "output": "feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_store_endpoint_url", + "output": "feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_store_endpoint_url", + "output": "feature store endpoint url config.toml: Feature Store server endpoint URL" + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_store_enable_tls do? : feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_store_enable_tls. : feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_enable_tls", + "output": "feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_enable_tls", + "output": "feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature store enable tls", + "output": "feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_store_enable_tls", + "output": "feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_store_enable_tls", + "output": "feature store enable tls config.toml: Enable TLS communication between DAI and the Feature Store server" + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_store_tls_cert_path do? : feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_store_tls_cert_path. : feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_tls_cert_path", + "output": "feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_tls_cert_path", + "output": "feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature store tls cert path", + "output": "feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_store_tls_cert_path", + "output": "feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_store_tls_cert_path", + "output": "feature store tls cert path config.toml: Path to the client certificate to authenticate with the Feature Store server. This is only effective when feature_store_enable_tls=True." + }, + { + "prompt_type": "plain", + "instruction": ": What does feature_store_access_token_scopes do? : feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain feature_store_access_token_scopes. : feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_access_token_scopes", + "output": "feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature_store_access_token_scopes", + "output": "feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "feature store access token scopes", + "output": "feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting feature_store_access_token_scopes", + "output": "feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting feature_store_access_token_scopes", + "output": "feature store access token scopes config.toml: A list of access token scopes used by the Feature Store connector to authenticate. (Space separate list)" + }, + { + "prompt_type": "plain", + "instruction": ": What does deployment_aws_access_key_id do? : deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain deployment_aws_access_key_id. : deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment_aws_access_key_id", + "output": "deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment_aws_access_key_id", + "output": "deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment aws access key id", + "output": "deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting deployment_aws_access_key_id", + "output": "deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting deployment_aws_access_key_id", + "output": "deployment aws access key id config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "plain", + "instruction": ": What does deployment_aws_secret_access_key do? : deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain deployment_aws_secret_access_key. : deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment_aws_secret_access_key", + "output": "deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment_aws_secret_access_key", + "output": "deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment aws secret access key", + "output": "deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting deployment_aws_secret_access_key", + "output": "deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting deployment_aws_secret_access_key", + "output": "deployment aws secret access key config.toml: Default AWS credentials to be used for scorer deployments." + }, + { + "prompt_type": "plain", + "instruction": ": What does deployment_aws_bucket_name do? : deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain deployment_aws_bucket_name. : deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment_aws_bucket_name", + "output": "deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment_aws_bucket_name", + "output": "deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deployment aws bucket name", + "output": "deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting deployment_aws_bucket_name", + "output": "deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting deployment_aws_bucket_name", + "output": "deployment aws bucket name config.toml: AWS S3 bucket to be used for scorer deployments." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_benchmark_runtime do? : triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_benchmark_runtime. : triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_benchmark_runtime", + "output": "triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_benchmark_runtime", + "output": "triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton benchmark runtime", + "output": "triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_benchmark_runtime", + "output": "triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_benchmark_runtime", + "output": "triton benchmark runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers when performing 'Benchmark' operations for a deployment. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_quick_test_runtime do? : triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_quick_test_runtime. : triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_quick_test_runtime", + "output": "triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_quick_test_runtime", + "output": "triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton quick test runtime", + "output": "triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_quick_test_runtime", + "output": "triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_quick_test_runtime", + "output": "triton quick test runtime config.toml: Approximate upper limit of time for Triton to take to compute latency and throughput performance numbers after loading up the deployment, per model. Higher values result in more accurate performance numbers." + }, + { + "prompt_type": "plain", + "instruction": ": What does deploy_wizard_num_per_page do? : deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "plain", + "instruction": ": Explain deploy_wizard_num_per_page. : deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deploy_wizard_num_per_page", + "output": "deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deploy_wizard_num_per_page", + "output": "deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "deploy wizard num per page", + "output": "deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting deploy_wizard_num_per_page", + "output": "deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting deploy_wizard_num_per_page", + "output": "deploy wizard num per page config.toml: Number of Triton deployments to show per page of the Deploy Wizard" + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_config_overrides_in_expert_page do? : allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_config_overrides_in_expert_page. : allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_config_overrides_in_expert_page", + "output": "allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_config_overrides_in_expert_page", + "output": "allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow config overrides in expert page", + "output": "allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_config_overrides_in_expert_page", + "output": "allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_config_overrides_in_expert_page", + "output": "allow config overrides in expert page config.toml: Whether to allow user to change non-server toml parameters per experiment in expert page." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cols_log_headtail do? : max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cols_log_headtail. : max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_log_headtail", + "output": "max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_log_headtail", + "output": "max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cols log headtail", + "output": "max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cols_log_headtail", + "output": "max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cols_log_headtail", + "output": "max cols log headtail config.toml: Maximum number of columns in each head and tail to log when ingesting data or running experiment on data." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_cols_gui_headtail do? : max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_cols_gui_headtail. : max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_gui_headtail", + "output": "max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_cols_gui_headtail", + "output": "max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max cols gui headtail", + "output": "max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_cols_gui_headtail", + "output": "max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_cols_gui_headtail", + "output": "max cols gui headtail config.toml: Maximum number of columns in each head and tail to show in GUI, useful when head or tail has all necessary columns, but too many for UI or web server to handle.-1 means no limit.A reasonable value is 500, after which web server or browser can become overloaded and use too much memory.Some values of column counts in UI may not show up correctly, and some dataset details functions may not work.To select (from GUI or client) any columns as being target, weight column, fold column, time column, time column groups, or dropped columns, the dataset should have those columns within the selected head or tail set of columns." + }, + { + "prompt_type": "plain", + "instruction": ": What does supported_file_types do? : supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain supported_file_types. : supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "supported_file_types", + "output": "supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "supported_file_types", + "output": "supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "supported file types", + "output": "supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting supported_file_types", + "output": "supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting supported_file_types", + "output": "supported file types config.toml: Supported file formats (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "plain", + "instruction": ": What does recipe_supported_file_types do? : recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain recipe_supported_file_types. : recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe_supported_file_types", + "output": "recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe_supported_file_types", + "output": "recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "recipe supported file types", + "output": "recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting recipe_supported_file_types", + "output": "recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting recipe_supported_file_types", + "output": "recipe supported file types config.toml: Supported file formats of data recipe files (file name endings must match for files to show up in file browser)" + }, + { + "prompt_type": "plain", + "instruction": ": What does list_files_without_extensions do? : list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "plain", + "instruction": ": Explain list_files_without_extensions. : list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "list_files_without_extensions", + "output": "list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "list_files_without_extensions", + "output": "list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "list files without extensions", + "output": "list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting list_files_without_extensions", + "output": "list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting list_files_without_extensions", + "output": "list files without extensions config.toml: By default, only supported file types (based on the file extensions listed above) will be listed for import into DAI Some data pipelines generate parquet files without any extensions. Enabling the below option will cause files without an extension to be listed in the file import dialog. DAI will import files without extensions as parquet files; if cannot be imported, an error is generated " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_localstorage do? : allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_localstorage. : allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_localstorage", + "output": "allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_localstorage", + "output": "allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow localstorage", + "output": "allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_localstorage", + "output": "allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_localstorage", + "output": "allow localstorage config.toml: Allow using browser localstorage, to improve UX." + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_orig_cols_in_predictions do? : allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_orig_cols_in_predictions. : allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_orig_cols_in_predictions", + "output": "allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_orig_cols_in_predictions", + "output": "allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow orig cols in predictions", + "output": "allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_orig_cols_in_predictions", + "output": "allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_orig_cols_in_predictions", + "output": "allow orig cols in predictions config.toml: Allow original dataset columns to be present in downloaded predictions CSV" + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_form_autocomplete do? : allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_form_autocomplete. : allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_form_autocomplete", + "output": "allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_form_autocomplete", + "output": "allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow form autocomplete", + "output": "allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_form_autocomplete", + "output": "allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_form_autocomplete", + "output": "allow form autocomplete config.toml: Allow the browser to store e.g. login credentials in login form (set to false for higher security)" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_projects do? : enable projects config.toml: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_projects. : enable projects config.toml: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Projects workspace: . : Set the enable projects config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_projects", + "output": "enable projects config.toml: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_projects", + "output": "enable projects config.toml: Enable Projects workspace: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable projects", + "output": "enable projects config.toml: Enable Projects workspace: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Projects workspace: ", + "output": "enable projects config.toml: Enable Projects workspace: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_projects", + "output": "enable projects config.toml: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_projects", + "output": "enable projects config.toml: Enable Projects workspace: Enable Projects workspace (alpha version, for evaluation)" + }, + { + "prompt_type": "plain", + "instruction": ": What does app_language do? : app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "plain", + "instruction": ": Explain app_language. : app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "app_language", + "output": "app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "app_language", + "output": "app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "app language", + "output": "app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting app_language", + "output": "app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting app_language", + "output": "app language config.toml: Default application language - options are 'en', 'ja', 'cn', 'ko'" + }, + { + "prompt_type": "plain", + "instruction": ": What does disablelogout do? : disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "plain", + "instruction": ": Explain disablelogout. : disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disablelogout", + "output": "disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disablelogout", + "output": "disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disablelogout", + "output": "disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting disablelogout", + "output": "disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting disablelogout", + "output": "disablelogout config.toml: If true, Logout button is not visible in the GUI." + }, + { + "prompt_type": "plain", + "instruction": ": What does python_client_path do? : python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "plain", + "instruction": ": Explain python_client_path. : python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_client_path", + "output": "python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_client_path", + "output": "python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python client path", + "output": "python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting python_client_path", + "output": "python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting python_client_path", + "output": "python client path config.toml: Local path to the location of the Driverless AI Python Client. If empty, will download from s3" + }, + { + "prompt_type": "plain", + "instruction": ": What does python_client_url do? : python client url config.toml: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "plain", + "instruction": ": Explain python_client_url. : python client url config.toml: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Python client wheel URL: . : Set the python client url config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_client_url", + "output": "python client url config.toml: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_client_url", + "output": "python client url config.toml: Python client wheel URL: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python client url", + "output": "python client url config.toml: Python client wheel URL: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Python client wheel URL: ", + "output": "python client url config.toml: Python client wheel URL: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting python_client_url", + "output": "python client url config.toml: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting python_client_url", + "output": "python client url config.toml: Python client wheel URL: URL from where new python client WHL file is fetched." + }, + { + "prompt_type": "plain", + "instruction": ": What does python_client_verify_integrity do? : python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "plain", + "instruction": ": Explain python_client_verify_integrity. : python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_client_verify_integrity", + "output": "python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python_client_verify_integrity", + "output": "python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "python client verify integrity", + "output": "python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting python_client_verify_integrity", + "output": "python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting python_client_verify_integrity", + "output": "python client verify integrity config.toml: If disabled, server won't verify if WHL package specified in `python_client_path` is valid DAI python client. Default True" + }, + { + "prompt_type": "plain", + "instruction": ": What does gui_require_experiment_name do? : gui require experiment name config.toml: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "plain", + "instruction": ": Explain gui_require_experiment_name. : gui require experiment name config.toml: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Require experiment name: . : Set the gui require experiment name config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gui_require_experiment_name", + "output": "gui require experiment name config.toml: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gui_require_experiment_name", + "output": "gui require experiment name config.toml: Require experiment name: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gui require experiment name", + "output": "gui require experiment name config.toml: Require experiment name: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Require experiment name: ", + "output": "gui require experiment name config.toml: Require experiment name: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gui_require_experiment_name", + "output": "gui require experiment name config.toml: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gui_require_experiment_name", + "output": "gui require experiment name config.toml: Require experiment name: When enabled, new experiment requires to specify expert name" + }, + { + "prompt_type": "plain", + "instruction": ": What does gui_enable_deploy_button do? : gui enable deploy button config.toml: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "plain", + "instruction": ": Explain gui_enable_deploy_button. : gui enable deploy button config.toml: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable experiment deploy button: . : Set the gui enable deploy button config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gui_enable_deploy_button", + "output": "gui enable deploy button config.toml: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gui_enable_deploy_button", + "output": "gui enable deploy button config.toml: Enable experiment deploy button: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gui enable deploy button", + "output": "gui enable deploy button config.toml: Enable experiment deploy button: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable experiment deploy button: ", + "output": "gui enable deploy button config.toml: Enable experiment deploy button: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gui_enable_deploy_button", + "output": "gui enable deploy button config.toml: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gui_enable_deploy_button", + "output": "gui enable deploy button config.toml: Enable experiment deploy button: When disabled, Deploy option will be disabled on finished experiment page" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_gui_product_tour do? : enable gui product tour config.toml: Display experiment tour" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_gui_product_tour. : enable gui product tour config.toml: Display experiment tour" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: If False, GUI won't show experiment/product tour, when first time using DriverlessAI: . : Set the enable gui product tour config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_gui_product_tour", + "output": "enable gui product tour config.toml: Display experiment tour" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_gui_product_tour", + "output": "enable gui product tour config.toml: If False, GUI won't show experiment/product tour, when first time using DriverlessAI: Display experiment tour" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable gui product tour", + "output": "enable gui product tour config.toml: If False, GUI won't show experiment/product tour, when first time using DriverlessAI: Display experiment tour" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "If False, GUI won't show experiment/product tour, when first time using DriverlessAI: ", + "output": "enable gui product tour config.toml: If False, GUI won't show experiment/product tour, when first time using DriverlessAI: Display experiment tour" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_gui_product_tour", + "output": "enable gui product tour config.toml: Display experiment tour" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_gui_product_tour", + "output": "enable gui product tour config.toml: If False, GUI won't show experiment/product tour, when first time using DriverlessAI: Display experiment tour" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_dataset_downloading do? : enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_dataset_downloading. : enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_dataset_downloading", + "output": "enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_dataset_downloading", + "output": "enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable dataset downloading", + "output": "enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_dataset_downloading", + "output": "enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_dataset_downloading", + "output": "enable dataset downloading config.toml: Whether user can download dataset as csv file" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_experiment_export do? : enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_experiment_export. : enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_experiment_export", + "output": "enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_experiment_export", + "output": "enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable experiment export", + "output": "enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_experiment_export", + "output": "enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_experiment_export", + "output": "enable experiment export config.toml: If enabled, user can export experiment as a Zip file" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_experiment_import do? : enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_experiment_import. : enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_experiment_import", + "output": "enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_experiment_import", + "output": "enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable experiment import", + "output": "enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_experiment_import", + "output": "enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_experiment_import", + "output": "enable experiment import config.toml: If enabled, user can import experiments, exported as Zip files from DriverlessAI" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_experiment_wizard do? : enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_experiment_wizard. : enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_experiment_wizard", + "output": "enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_experiment_wizard", + "output": "enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable experiment wizard", + "output": "enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_experiment_wizard", + "output": "enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_experiment_wizard", + "output": "enable experiment wizard config.toml: (EXPERIMENTAL) If enabled, user can launch experiment via new `Predict Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_join_wizard do? : enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_join_wizard. : enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_join_wizard", + "output": "enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_join_wizard", + "output": "enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable join wizard", + "output": "enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_join_wizard", + "output": "enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_join_wizard", + "output": "enable join wizard config.toml: (EXPERIMENTAL) If enabled, user can do joins via new `Join Wizard` options, which navigates to the new Nitro wizard." + }, + { + "prompt_type": "plain", + "instruction": ": What does hac_link_url do? : hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "plain", + "instruction": ": Explain hac_link_url. : hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hac_link_url", + "output": "hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hac_link_url", + "output": "hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hac link url", + "output": "hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hac_link_url", + "output": "hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hac_link_url", + "output": "hac link url config.toml: URL address of the H2O AI link" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_license_manager do? : enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_license_manager. : enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_license_manager", + "output": "enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_license_manager", + "output": "enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable license manager", + "output": "enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_license_manager", + "output": "enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_license_manager", + "output": "enable license manager config.toml: Switches Driverless AI to use H2O.ai License Management Server to manage licenses/permission to use software" + }, + { + "prompt_type": "plain", + "instruction": ": What does license_manager_address do? : license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "plain", + "instruction": ": Explain license_manager_address. : license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_address", + "output": "license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_address", + "output": "license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license manager address", + "output": "license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting license_manager_address", + "output": "license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting license_manager_address", + "output": "license manager address config.toml: Address at which to communicate with H2O.ai License Management Server. Requires above value, `enable_license_manager` set to True. Format: {http/https}://{ip address}:{port number} " + }, + { + "prompt_type": "plain", + "instruction": ": What does license_manager_project_name do? : license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "plain", + "instruction": ": Explain license_manager_project_name. : license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_project_name", + "output": "license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_project_name", + "output": "license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license manager project name", + "output": "license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting license_manager_project_name", + "output": "license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting license_manager_project_name", + "output": "license manager project name config.toml: Name of license manager project that Driverless AI will attempt to retrieve leases from. NOTE: requires an active license within the License Manager Server to function properly " + }, + { + "prompt_type": "plain", + "instruction": ": What does license_manager_lease_duration do? : license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "plain", + "instruction": ": Explain license_manager_lease_duration. : license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_lease_duration", + "output": "license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_lease_duration", + "output": "license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license manager lease duration", + "output": "license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting license_manager_lease_duration", + "output": "license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting license_manager_lease_duration", + "output": "license manager lease duration config.toml: Number of milliseconds a lease for users will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 3600000 (1 hour) = 1 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "plain", + "instruction": ": What does license_manager_worker_lease_duration do? : license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "plain", + "instruction": ": Explain license_manager_worker_lease_duration. : license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_worker_lease_duration", + "output": "license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_worker_lease_duration", + "output": "license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license manager worker lease duration", + "output": "license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting license_manager_worker_lease_duration", + "output": "license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting license_manager_worker_lease_duration", + "output": "license manager worker lease duration config.toml: Number of milliseconds a lease for Driverless AI worker nodes will be expected to last, if using the H2O.ai License Manager server, before the lease REQUIRES renewal. Default: 21600000 (6 hour) = 6 hour * 60 min / hour * 60 sec / min * 1000 milliseconds / sec " + }, + { + "prompt_type": "plain", + "instruction": ": What does license_manager_ssl_certs do? : license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "plain", + "instruction": ": Explain license_manager_ssl_certs. : license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_ssl_certs", + "output": "license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_ssl_certs", + "output": "license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license manager ssl certs", + "output": "license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting license_manager_ssl_certs", + "output": "license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting license_manager_ssl_certs", + "output": "license manager ssl certs config.toml: To be used only if License Manager server is started with HTTPS Accepts a boolean: true/false, or a path to a file/directory. Denotates whether or not to attempt SSL Certificate verification when making a request to the License Manager server. True: attempt ssl certificate verification, will fail if certificates are self signed False: skip ssl certificate verification. /path/to/cert/directory: load certificates in directory and use those for certificate verification Behaves in the same manner as python requests package: https://requests.readthedocs.io/en/latest/user/advanced/#ssl-cert-verification " + }, + { + "prompt_type": "plain", + "instruction": ": What does license_manager_worker_startup_timeout do? : license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain license_manager_worker_startup_timeout. : license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_worker_startup_timeout", + "output": "license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_worker_startup_timeout", + "output": "license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license manager worker startup timeout", + "output": "license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting license_manager_worker_startup_timeout", + "output": "license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting license_manager_worker_startup_timeout", + "output": "license manager worker startup timeout config.toml: Amount of time that Driverless AI workers will keep retrying to startup and obtain a lease from the license manager before timing out. Time out will cause worker startup to fail. " + }, + { + "prompt_type": "plain", + "instruction": ": What does license_manager_dry_run_token do? : license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain license_manager_dry_run_token. : license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_dry_run_token", + "output": "license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license_manager_dry_run_token", + "output": "license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "license manager dry run token", + "output": "license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting license_manager_dry_run_token", + "output": "license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting license_manager_dry_run_token", + "output": "license manager dry run token config.toml: Emergency setting that will allow Driverless AI to run even if there is issues communicating with or obtaining leases from, the License Manager server. This is an encoded string that can be obtained from either the license manager ui or the logs of the license manager server. " + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_lime_method do? : mli lime method config.toml: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_lime_method. : mli lime method config.toml: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LIME method: . : Set the mli lime method config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_lime_method", + "output": "mli lime method config.toml: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_lime_method", + "output": "mli lime method config.toml: LIME method: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli lime method", + "output": "mli lime method config.toml: LIME method: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LIME method: ", + "output": "mli lime method config.toml: LIME method: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_lime_method", + "output": "mli lime method config.toml: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_lime_method", + "output": "mli lime method config.toml: LIME method: Choose LIME method to be used for creation of surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_use_raw_features do? : mli use raw features config.toml: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_use_raw_features. : mli use raw features config.toml: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Use original features for surrogate models: . : Set the mli use raw features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_use_raw_features", + "output": "mli use raw features config.toml: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_use_raw_features", + "output": "mli use raw features config.toml: Use original features for surrogate models: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli use raw features", + "output": "mli use raw features config.toml: Use original features for surrogate models: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Use original features for surrogate models: ", + "output": "mli use raw features config.toml: Use original features for surrogate models: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_use_raw_features", + "output": "mli use raw features config.toml: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_use_raw_features", + "output": "mli use raw features config.toml: Use original features for surrogate models: Choose whether surrogate models should be built for original or transformed features." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_ts_use_raw_features do? : mli ts use raw features config.toml: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_ts_use_raw_features. : mli ts use raw features config.toml: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Use original features for time series based surrogate models: . : Set the mli ts use raw features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_use_raw_features", + "output": "mli ts use raw features config.toml: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_use_raw_features", + "output": "mli ts use raw features config.toml: Use original features for time series based surrogate models: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli ts use raw features", + "output": "mli ts use raw features config.toml: Use original features for time series based surrogate models: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Use original features for time series based surrogate models: ", + "output": "mli ts use raw features config.toml: Use original features for time series based surrogate models: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_ts_use_raw_features", + "output": "mli ts use raw features config.toml: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_ts_use_raw_features", + "output": "mli ts use raw features config.toml: Use original features for time series based surrogate models: Choose whether time series based surrogate models should be built for original features." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sample do? : mli sample config.toml: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sample. : mli sample config.toml: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample all explainers: . : Set the mli sample config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample", + "output": "mli sample config.toml: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample", + "output": "mli sample config.toml: Sample all explainers: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sample", + "output": "mli sample config.toml: Sample all explainers: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample all explainers: ", + "output": "mli sample config.toml: Sample all explainers: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sample", + "output": "mli sample config.toml: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sample", + "output": "mli sample config.toml: Sample all explainers: Choose whether to run all explainers on the sampled dataset." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_vars_to_pdp do? : mli vars to pdp config.toml: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_vars_to_pdp. : mli vars to pdp config.toml: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of features for Surrogate Partial Dependence Plot. Set to -1 to use all features.: . : Set the mli vars to pdp config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_vars_to_pdp", + "output": "mli vars to pdp config.toml: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_vars_to_pdp", + "output": "mli vars to pdp config.toml: Number of features for Surrogate Partial Dependence Plot. Set to -1 to use all features.: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli vars to pdp", + "output": "mli vars to pdp config.toml: Number of features for Surrogate Partial Dependence Plot. Set to -1 to use all features.: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of features for Surrogate Partial Dependence Plot. Set to -1 to use all features.: ", + "output": "mli vars to pdp config.toml: Number of features for Surrogate Partial Dependence Plot. Set to -1 to use all features.: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_vars_to_pdp", + "output": "mli vars to pdp config.toml: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_vars_to_pdp", + "output": "mli vars to pdp config.toml: Number of features for Surrogate Partial Dependence Plot. Set to -1 to use all features.: Set maximum number of features for which to build Surrogate Partial Dependence Plot. Use -1 to calculate Surrogate Partial Dependence Plot for all features." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nfolds do? : mli nfolds config.toml: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nfolds. : mli nfolds config.toml: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Cross-validation folds for surrogate models: . : Set the mli nfolds config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nfolds", + "output": "mli nfolds config.toml: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nfolds", + "output": "mli nfolds config.toml: Cross-validation folds for surrogate models: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nfolds", + "output": "mli nfolds config.toml: Cross-validation folds for surrogate models: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Cross-validation folds for surrogate models: ", + "output": "mli nfolds config.toml: Cross-validation folds for surrogate models: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nfolds", + "output": "mli nfolds config.toml: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nfolds", + "output": "mli nfolds config.toml: Cross-validation folds for surrogate models: Set the number of cross-validation folds for surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_qbin_count do? : mli qbin count config.toml: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_qbin_count. : mli qbin count config.toml: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of columns to bin for surrogate models: . : Set the mli qbin count config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_qbin_count", + "output": "mli qbin count config.toml: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_qbin_count", + "output": "mli qbin count config.toml: Number of columns to bin for surrogate models: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli qbin count", + "output": "mli qbin count config.toml: Number of columns to bin for surrogate models: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of columns to bin for surrogate models: ", + "output": "mli qbin count config.toml: Number of columns to bin for surrogate models: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_qbin_count", + "output": "mli qbin count config.toml: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_qbin_count", + "output": "mli qbin count config.toml: Number of columns to bin for surrogate models: Set the number of columns to bin in case of quantile binning." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_mli_nthreads do? : h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_mli_nthreads. : h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_mli_nthreads", + "output": "h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_mli_nthreads", + "output": "h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o mli nthreads", + "output": "h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_mli_nthreads", + "output": "h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_mli_nthreads", + "output": "h2o mli nthreads config.toml: Number of threads for H2O instance for use by MLI." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_enable_mojo_scorer do? : mli enable mojo scorer config.toml: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_enable_mojo_scorer. : mli enable mojo scorer config.toml: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Allow use of MOJO scoring pipeline: . : Set the mli enable mojo scorer config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_enable_mojo_scorer", + "output": "mli enable mojo scorer config.toml: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_enable_mojo_scorer", + "output": "mli enable mojo scorer config.toml: Allow use of MOJO scoring pipeline: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli enable mojo scorer", + "output": "mli enable mojo scorer config.toml: Allow use of MOJO scoring pipeline: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allow use of MOJO scoring pipeline: ", + "output": "mli enable mojo scorer config.toml: Allow use of MOJO scoring pipeline: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_enable_mojo_scorer", + "output": "mli enable mojo scorer config.toml: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_enable_mojo_scorer", + "output": "mli enable mojo scorer config.toml: Allow use of MOJO scoring pipeline: Use this option to disable MOJO scoring pipeline. Scoring pipeline is chosen automatically (from MOJO and Python pipelines) by default. In case of certain models MOJO vs. Python choice can impact pipeline performance and robustness." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sample_above_for_scoring do? : mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sample_above_for_scoring. : mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_above_for_scoring", + "output": "mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_above_for_scoring", + "output": "mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sample above for scoring", + "output": "mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sample_above_for_scoring", + "output": "mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sample_above_for_scoring", + "output": "mli sample above for scoring config.toml: When number of rows are above this limit sample for MLI for scoring UI data." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sample_above_for_training do? : mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sample_above_for_training. : mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_above_for_training", + "output": "mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_above_for_training", + "output": "mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sample above for training", + "output": "mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sample_above_for_training", + "output": "mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sample_above_for_training", + "output": "mli sample above for training config.toml: When number of rows are above this limit sample for MLI for training surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sample_size do? : mli sample size config.toml: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sample_size. : mli sample size config.toml: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample size for surrogate models: . : Set the mli sample size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_size", + "output": "mli sample size config.toml: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_size", + "output": "mli sample size config.toml: Sample size for surrogate models: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sample size", + "output": "mli sample size config.toml: Sample size for surrogate models: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample size for surrogate models: ", + "output": "mli sample size config.toml: Sample size for surrogate models: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sample_size", + "output": "mli sample size config.toml: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sample_size", + "output": "mli sample size config.toml: Sample size for surrogate models: The sample size, number of rows, used for MLI surrogate models." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_num_quantiles do? : mli num quantiles config.toml: Number of bins for quantile binning." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_num_quantiles. : mli num quantiles config.toml: Number of bins for quantile binning." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of bins for quantile binning: . : Set the mli num quantiles config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_num_quantiles", + "output": "mli num quantiles config.toml: Number of bins for quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_num_quantiles", + "output": "mli num quantiles config.toml: Number of bins for quantile binning: Number of bins for quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli num quantiles", + "output": "mli num quantiles config.toml: Number of bins for quantile binning: Number of bins for quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of bins for quantile binning: ", + "output": "mli num quantiles config.toml: Number of bins for quantile binning: Number of bins for quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_num_quantiles", + "output": "mli num quantiles config.toml: Number of bins for quantile binning." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_num_quantiles", + "output": "mli num quantiles config.toml: Number of bins for quantile binning: Number of bins for quantile binning." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_drf_num_trees do? : mli drf num trees config.toml: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_drf_num_trees. : mli drf num trees config.toml: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of trees for Random Forest surrogate model: . : Set the mli drf num trees config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_drf_num_trees", + "output": "mli drf num trees config.toml: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_drf_num_trees", + "output": "mli drf num trees config.toml: Number of trees for Random Forest surrogate model: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli drf num trees", + "output": "mli drf num trees config.toml: Number of trees for Random Forest surrogate model: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of trees for Random Forest surrogate model: ", + "output": "mli drf num trees config.toml: Number of trees for Random Forest surrogate model: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_drf_num_trees", + "output": "mli drf num trees config.toml: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_drf_num_trees", + "output": "mli drf num trees config.toml: Number of trees for Random Forest surrogate model: Number of trees for Random Forest surrogate model." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_fast_approx do? : mli fast approx config.toml: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_fast_approx. : mli fast approx config.toml: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Speed up predictions with a fast approximation: . : Set the mli fast approx config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_fast_approx", + "output": "mli fast approx config.toml: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_fast_approx", + "output": "mli fast approx config.toml: Speed up predictions with a fast approximation: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli fast approx", + "output": "mli fast approx config.toml: Speed up predictions with a fast approximation: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Speed up predictions with a fast approximation: ", + "output": "mli fast approx config.toml: Speed up predictions with a fast approximation: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_fast_approx", + "output": "mli fast approx config.toml: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_fast_approx", + "output": "mli fast approx config.toml: Speed up predictions with a fast approximation: Speed up predictions with a fast approximation (can reduce the number of trees or cross-validation folds)." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_interpreter_status_cache_size do? : mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_interpreter_status_cache_size. : mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_interpreter_status_cache_size", + "output": "mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_interpreter_status_cache_size", + "output": "mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli interpreter status cache size", + "output": "mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_interpreter_status_cache_size", + "output": "mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_interpreter_status_cache_size", + "output": "mli interpreter status cache size config.toml: Maximum number of interpreters status cache entries." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_drf_max_depth do? : mli drf max depth config.toml: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_drf_max_depth. : mli drf max depth config.toml: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max depth for Random Forest surrogate model: . : Set the mli drf max depth config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_drf_max_depth", + "output": "mli drf max depth config.toml: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_drf_max_depth", + "output": "mli drf max depth config.toml: Max depth for Random Forest surrogate model: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli drf max depth", + "output": "mli drf max depth config.toml: Max depth for Random Forest surrogate model: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max depth for Random Forest surrogate model: ", + "output": "mli drf max depth config.toml: Max depth for Random Forest surrogate model: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_drf_max_depth", + "output": "mli drf max depth config.toml: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_drf_max_depth", + "output": "mli drf max depth config.toml: Max depth for Random Forest surrogate model: Max depth for Random Forest surrogate model." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sample_training do? : mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sample_training. : mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_training", + "output": "mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sample_training", + "output": "mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sample training", + "output": "mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sample_training", + "output": "mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sample_training", + "output": "mli sample training config.toml: not only sample training, but also sample scoring." + }, + { + "prompt_type": "plain", + "instruction": ": What does klime_lambda do? : klime lambda config.toml: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "plain", + "instruction": ": Explain klime_lambda. : klime lambda config.toml: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Regularization strength for k-LIME GLM's: . : Set the klime lambda config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "klime_lambda", + "output": "klime lambda config.toml: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "klime_lambda", + "output": "klime lambda config.toml: Regularization strength for k-LIME GLM's: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "klime lambda", + "output": "klime lambda config.toml: Regularization strength for k-LIME GLM's: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Regularization strength for k-LIME GLM's: ", + "output": "klime lambda config.toml: Regularization strength for k-LIME GLM's: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting klime_lambda", + "output": "klime lambda config.toml: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting klime_lambda", + "output": "klime lambda config.toml: Regularization strength for k-LIME GLM's: Regularization strength for k-LIME GLM's." + }, + { + "prompt_type": "plain", + "instruction": ": What does klime_alpha do? : klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "plain", + "instruction": ": Explain klime_alpha. : klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Regularization distribution between L1 and L2 for k-LIME GLM's: . : Set the klime alpha config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "klime_alpha", + "output": "klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "klime_alpha", + "output": "klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "klime alpha", + "output": "klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Regularization distribution between L1 and L2 for k-LIME GLM's: ", + "output": "klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting klime_alpha", + "output": "klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting klime_alpha", + "output": "klime alpha config.toml: Regularization distribution between L1 and L2 for k-LIME GLM's: Regularization distribution between L1 and L2 for k-LIME GLM's." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_max_numeric_enum_cardinality do? : mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_max_numeric_enum_cardinality. : mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max cardinality for numeric variables in surrogate models to be considered categorical: . : Set the mli max numeric enum cardinality config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_numeric_enum_cardinality", + "output": "mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_numeric_enum_cardinality", + "output": "mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli max numeric enum cardinality", + "output": "mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max cardinality for numeric variables in surrogate models to be considered categorical: ", + "output": "mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_max_numeric_enum_cardinality", + "output": "mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_max_numeric_enum_cardinality", + "output": "mli max numeric enum cardinality config.toml: Max cardinality for numeric variables in surrogate models to be considered categorical: Max cardinality for numeric variables in surrogate models to be considered categorical." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_max_number_cluster_vars do? : mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_max_number_cluster_vars. : mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of features allowed for k-LIME k-means clustering: . : Set the mli max number cluster vars config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_number_cluster_vars", + "output": "mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_number_cluster_vars", + "output": "mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli max number cluster vars", + "output": "mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of features allowed for k-LIME k-means clustering: ", + "output": "mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_max_number_cluster_vars", + "output": "mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_max_number_cluster_vars", + "output": "mli max number cluster vars config.toml: Maximum number of features allowed for k-LIME k-means clustering: Maximum number of features allowed for k-LIME k-means clustering." + }, + { + "prompt_type": "plain", + "instruction": ": What does use_all_columns_klime_kmeans do? : use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_all_columns_klime_kmeans. : use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`): . : Set the use all columns klime kmeans config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_all_columns_klime_kmeans", + "output": "use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_all_columns_klime_kmeans", + "output": "use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`): Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use all columns klime kmeans", + "output": "use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`): Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`): ", + "output": "use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`): Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_all_columns_klime_kmeans", + "output": "use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_all_columns_klime_kmeans", + "output": "use all columns klime kmeans config.toml: Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`): Use all columns for k-LIME k-means clustering (this will override `mli_max_number_cluster_vars` if set to `True`)." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_strict_version_check do? : mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_strict_version_check. : mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_strict_version_check", + "output": "mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_strict_version_check", + "output": "mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli strict version check", + "output": "mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_strict_version_check", + "output": "mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_strict_version_check", + "output": "mli strict version check config.toml: Strict version check for MLI" + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_cloud_name do? : mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_cloud_name. : mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_cloud_name", + "output": "mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_cloud_name", + "output": "mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli cloud name", + "output": "mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_cloud_name", + "output": "mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_cloud_name", + "output": "mli cloud name config.toml: MLI cloud name" + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_ice_per_bin_strategy do? : mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_ice_per_bin_strategy. : mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ice_per_bin_strategy", + "output": "mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ice_per_bin_strategy", + "output": "mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli ice per bin strategy", + "output": "mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_ice_per_bin_strategy", + "output": "mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_ice_per_bin_strategy", + "output": "mli ice per bin strategy config.toml: Compute original model ICE using per feature's bin predictions (true) or use \"one frame\" strategy (false)." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_dia_default_max_cardinality do? : mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_dia_default_max_cardinality. : mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_dia_default_max_cardinality", + "output": "mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_dia_default_max_cardinality", + "output": "mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli dia default max cardinality", + "output": "mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_dia_default_max_cardinality", + "output": "mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_dia_default_max_cardinality", + "output": "mli dia default max cardinality config.toml: By default DIA will run for categorical columns with cardinality <= mli_dia_default_max_cardinality." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_dia_default_min_cardinality do? : mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_dia_default_min_cardinality. : mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_dia_default_min_cardinality", + "output": "mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_dia_default_min_cardinality", + "output": "mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli dia default min cardinality", + "output": "mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_dia_default_min_cardinality", + "output": "mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_dia_default_min_cardinality", + "output": "mli dia default min cardinality config.toml: By default DIA will run for categorical columns with cardinality >= mli_dia_default_min_cardinality." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_shapley_sample_size do? : mli shapley sample size config.toml: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_shapley_sample_size. : mli shapley sample size config.toml: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample size for transformed Shapley: . : Set the mli shapley sample size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_shapley_sample_size", + "output": "mli shapley sample size config.toml: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_shapley_sample_size", + "output": "mli shapley sample size config.toml: Sample size for transformed Shapley: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli shapley sample size", + "output": "mli shapley sample size config.toml: Sample size for transformed Shapley: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample size for transformed Shapley: ", + "output": "mli shapley sample size config.toml: Sample size for transformed Shapley: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_shapley_sample_size", + "output": "mli shapley sample size config.toml: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_shapley_sample_size", + "output": "mli shapley sample size config.toml: Sample size for transformed Shapley: When number of rows are above this limit, then sample for MLI transformed Shapley calculation." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_mli_keeper do? : enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_mli_keeper. : enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_keeper", + "output": "enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_keeper", + "output": "enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable mli keeper", + "output": "enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_mli_keeper", + "output": "enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_mli_keeper", + "output": "enable mli keeper config.toml: Enable MLI keeper which ensures efficient use of filesystem/memory/DB by MLI." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_mli_sa do? : enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_mli_sa. : enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_sa", + "output": "enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_sa", + "output": "enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable mli sa", + "output": "enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_mli_sa", + "output": "enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_mli_sa", + "output": "enable mli sa config.toml: Enable MLI Sensitivity Analysis" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_mli_priority_queues do? : enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_mli_priority_queues. : enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_priority_queues", + "output": "enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_priority_queues", + "output": "enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable mli priority queues", + "output": "enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_mli_priority_queues", + "output": "enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_mli_priority_queues", + "output": "enable mli priority queues config.toml: Enable priority queues based explainers execution. Priority queues restrict available system resources and prevent system over-utilization. Interpretation execution time might be (significantly) slower." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sequential_task_execution do? : mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sequential_task_execution. : mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sequential_task_execution", + "output": "mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sequential_task_execution", + "output": "mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sequential task execution", + "output": "mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sequential_task_execution", + "output": "mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sequential_task_execution", + "output": "mli sequential task execution config.toml: Explainers are run sequentially by default. This option can be used to run all explainers in parallel which can - depending on hardware strength and the number of explainers - decrease interpretation duration. Consider explainer dependencies, random explainers order and hardware over utilization." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_dia_sample_size do? : mli dia sample size config.toml: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_dia_sample_size. : mli dia sample size config.toml: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample size for Disparate Impact Analysis: . : Set the mli dia sample size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_dia_sample_size", + "output": "mli dia sample size config.toml: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_dia_sample_size", + "output": "mli dia sample size config.toml: Sample size for Disparate Impact Analysis: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli dia sample size", + "output": "mli dia sample size config.toml: Sample size for Disparate Impact Analysis: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample size for Disparate Impact Analysis: ", + "output": "mli dia sample size config.toml: Sample size for Disparate Impact Analysis: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_dia_sample_size", + "output": "mli dia sample size config.toml: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_dia_sample_size", + "output": "mli dia sample size config.toml: Sample size for Disparate Impact Analysis: When number of rows are above this limit, then sample for Disparate Impact Analysis." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_pd_sample_size do? : mli pd sample size config.toml: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_pd_sample_size. : mli pd sample size config.toml: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample size for Partial Dependence Plot: . : Set the mli pd sample size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_pd_sample_size", + "output": "mli pd sample size config.toml: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_pd_sample_size", + "output": "mli pd sample size config.toml: Sample size for Partial Dependence Plot: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli pd sample size", + "output": "mli pd sample size config.toml: Sample size for Partial Dependence Plot: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample size for Partial Dependence Plot: ", + "output": "mli pd sample size config.toml: Sample size for Partial Dependence Plot: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_pd_sample_size", + "output": "mli pd sample size config.toml: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_pd_sample_size", + "output": "mli pd sample size config.toml: Sample size for Partial Dependence Plot: When number of rows are above this limit, then sample for Partial Dependence Plot." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_pd_numcat_num_chart do? : mli pd numcat num chart config.toml: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_pd_numcat_num_chart. : mli pd numcat num chart config.toml: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Unique feature values count driven Partial Dependence Plot binning and chart selection: . : Set the mli pd numcat num chart config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_pd_numcat_num_chart", + "output": "mli pd numcat num chart config.toml: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_pd_numcat_num_chart", + "output": "mli pd numcat num chart config.toml: Unique feature values count driven Partial Dependence Plot binning and chart selection: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli pd numcat num chart", + "output": "mli pd numcat num chart config.toml: Unique feature values count driven Partial Dependence Plot binning and chart selection: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Unique feature values count driven Partial Dependence Plot binning and chart selection: ", + "output": "mli pd numcat num chart config.toml: Unique feature values count driven Partial Dependence Plot binning and chart selection: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_pd_numcat_num_chart", + "output": "mli pd numcat num chart config.toml: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_pd_numcat_num_chart", + "output": "mli pd numcat num chart config.toml: Unique feature values count driven Partial Dependence Plot binning and chart selection: Use dynamic switching between Partial Dependence Plot numeric and categorical binning and UI chart selection in case of features which were used both as numeric and categorical by experiment." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_pd_numcat_threshold do? : mli pd numcat threshold config.toml: If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_pd_numcat_threshold. : mli pd numcat threshold config.toml: If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Threshold for Partial Dependence Plot binning and chart selection (<=threshold categorical, >threshold numeric): . : Set the mli pd numcat threshold config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_pd_numcat_threshold", + "output": "mli pd numcat threshold config.toml: If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_pd_numcat_threshold", + "output": "mli pd numcat threshold config.toml: Threshold for Partial Dependence Plot binning and chart selection (<=threshold categorical, >threshold numeric): If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli pd numcat threshold", + "output": "mli pd numcat threshold config.toml: Threshold for Partial Dependence Plot binning and chart selection (<=threshold categorical, >threshold numeric): If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Threshold for Partial Dependence Plot binning and chart selection (<=threshold categorical, >threshold numeric): ", + "output": "mli pd numcat threshold config.toml: Threshold for Partial Dependence Plot binning and chart selection (<=threshold categorical, >threshold numeric): If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_pd_numcat_threshold", + "output": "mli pd numcat threshold config.toml: If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_pd_numcat_threshold", + "output": "mli pd numcat threshold config.toml: Threshold for Partial Dependence Plot binning and chart selection (<=threshold categorical, >threshold numeric): If 'mli_pd_numcat_num_chart' is enabled, then use numeric binning and chart if feature unique values count is bigger than threshold, else use categorical binning and chart." + }, + { + "prompt_type": "plain", + "instruction": ": What does new_mli_list_only_explainable_datasets do? : new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "plain", + "instruction": ": Explain new_mli_list_only_explainable_datasets. : new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "new_mli_list_only_explainable_datasets", + "output": "new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "new_mli_list_only_explainable_datasets", + "output": "new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "new mli list only explainable datasets", + "output": "new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting new_mli_list_only_explainable_datasets", + "output": "new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting new_mli_list_only_explainable_datasets", + "output": "new mli list only explainable datasets config.toml: In New Interpretation screen show only datasets which can be used to explain a selected model. This can slow down the server significantly." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_mli_async_api do? : enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_mli_async_api. : enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_async_api", + "output": "enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_async_api", + "output": "enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable mli async api", + "output": "enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_mli_async_api", + "output": "enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_mli_async_api", + "output": "enable mli async api config.toml: Enable async/await-based non-blocking MLI API" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_mli_sa_main_chart_aggregator do? : enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_mli_sa_main_chart_aggregator. : enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_sa_main_chart_aggregator", + "output": "enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_sa_main_chart_aggregator", + "output": "enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable mli sa main chart aggregator", + "output": "enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_mli_sa_main_chart_aggregator", + "output": "enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_mli_sa_main_chart_aggregator", + "output": "enable mli sa main chart aggregator config.toml: Enable main chart aggregator in Sensitivity Analysis" + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sa_sampling_limit do? : mli sa sampling limit config.toml: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sa_sampling_limit. : mli sa sampling limit config.toml: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample size for SA: . : Set the mli sa sampling limit config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sa_sampling_limit", + "output": "mli sa sampling limit config.toml: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sa_sampling_limit", + "output": "mli sa sampling limit config.toml: Sample size for SA: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sa sampling limit", + "output": "mli sa sampling limit config.toml: Sample size for SA: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample size for SA: ", + "output": "mli sa sampling limit config.toml: Sample size for SA: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sa_sampling_limit", + "output": "mli sa sampling limit config.toml: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sa_sampling_limit", + "output": "mli sa sampling limit config.toml: Sample size for SA: When to sample for Sensitivity Analysis (number of rows after sampling)." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_sa_main_chart_aggregator_limit do? : mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_sa_main_chart_aggregator_limit. : mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sa_main_chart_aggregator_limit", + "output": "mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_sa_main_chart_aggregator_limit", + "output": "mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli sa main chart aggregator limit", + "output": "mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_sa_main_chart_aggregator_limit", + "output": "mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_sa_main_chart_aggregator_limit", + "output": "mli sa main chart aggregator limit config.toml: Run main chart aggregator in Sensitivity Analysis when the number of dataset instances is bigger than given limit." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_predict_safe do? : mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_predict_safe. : mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_predict_safe", + "output": "mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_predict_safe", + "output": "mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli predict safe", + "output": "mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_predict_safe", + "output": "mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_predict_safe", + "output": "mli predict safe config.toml: Use predict_safe() (true) or predict_base() (false) in MLI (PD, ICE, SA, ...)." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_max_surrogate_retries do? : mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_max_surrogate_retries. : mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_surrogate_retries", + "output": "mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_surrogate_retries", + "output": "mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli max surrogate retries", + "output": "mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_max_surrogate_retries", + "output": "mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_max_surrogate_retries", + "output": "mli max surrogate retries config.toml: Number of max retries should the surrogate model fail to build." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_mli_symlinks do? : enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_mli_symlinks. : enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_symlinks", + "output": "enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_mli_symlinks", + "output": "enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable mli symlinks", + "output": "enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_mli_symlinks", + "output": "enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_mli_symlinks", + "output": "enable mli symlinks config.toml: Allow use of symlinks (instead of file copy) by MLI explainer procedures." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_mli_fraction_memory do? : h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_mli_fraction_memory. : h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_mli_fraction_memory", + "output": "h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_mli_fraction_memory", + "output": "h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o mli fraction memory", + "output": "h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_mli_fraction_memory", + "output": "h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_mli_fraction_memory", + "output": "h2o mli fraction memory config.toml: Fraction of memory to allocate for h2o MLI jar" + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_custom do? : mli custom config.toml: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_custom. : mli custom config.toml: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Add to config.toml via TOML string: . : Set the mli custom config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_custom", + "output": "mli custom config.toml: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_custom", + "output": "mli custom config.toml: Add to config.toml via TOML string: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli custom", + "output": "mli custom config.toml: Add to config.toml via TOML string: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Add to config.toml via TOML string: ", + "output": "mli custom config.toml: Add to config.toml via TOML string: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_custom", + "output": "mli custom config.toml: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_custom", + "output": "mli custom config.toml: Add to config.toml via TOML string: Add TOML string to Driverless AI server config.toml configuration file." + }, + { + "prompt_type": "plain", + "instruction": ": What does excluded_mli_explainers do? : excluded mli explainers config.toml: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "plain", + "instruction": ": Explain excluded_mli_explainers. : excluded mli explainers config.toml: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Exclude specific explainers by explainer ID: . : Set the excluded mli explainers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_mli_explainers", + "output": "excluded mli explainers config.toml: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded_mli_explainers", + "output": "excluded mli explainers config.toml: Exclude specific explainers by explainer ID: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "excluded mli explainers", + "output": "excluded mli explainers config.toml: Exclude specific explainers by explainer ID: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Exclude specific explainers by explainer ID: ", + "output": "excluded mli explainers config.toml: Exclude specific explainers by explainer ID: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting excluded_mli_explainers", + "output": "excluded mli explainers config.toml: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting excluded_mli_explainers", + "output": "excluded mli explainers config.toml: Exclude specific explainers by explainer ID: To exclude e.g. Sensitivity Analysis explainer use: excluded_mli_explainers=['h2oaicore.mli.byor.recipes.sa_explainer.SaExplainer']." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_ws_perfmon do? : enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_ws_perfmon. : enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_ws_perfmon", + "output": "enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_ws_perfmon", + "output": "enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable ws perfmon", + "output": "enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_ws_perfmon", + "output": "enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_ws_perfmon", + "output": "enable ws perfmon config.toml: Enable RPC API performance monitor." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_kernel_explainer_workers do? : mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_kernel_explainer_workers. : mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_workers", + "output": "mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_workers", + "output": "mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli kernel explainer workers", + "output": "mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_kernel_explainer_workers", + "output": "mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_kernel_explainer_workers", + "output": "mli kernel explainer workers config.toml: Number of parallel workers when scoring using MOJO in Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_run_kernel_explainer do? : mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_run_kernel_explainer. : mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Use Kernel Explainer to obtain Shapley values for original features: . : Set the mli run kernel explainer config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_run_kernel_explainer", + "output": "mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_run_kernel_explainer", + "output": "mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli run kernel explainer", + "output": "mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Use Kernel Explainer to obtain Shapley values for original features: ", + "output": "mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_run_kernel_explainer", + "output": "mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_run_kernel_explainer", + "output": "mli run kernel explainer config.toml: Use Kernel Explainer to obtain Shapley values for original features: Use Kernel Explainer to obtain Shapley values for original features." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_kernel_explainer_sample do? : mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_kernel_explainer_sample. : mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample input dataset for Kernel Explainer: . : Set the mli kernel explainer sample config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_sample", + "output": "mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_sample", + "output": "mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli kernel explainer sample", + "output": "mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample input dataset for Kernel Explainer: ", + "output": "mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_kernel_explainer_sample", + "output": "mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_kernel_explainer_sample", + "output": "mli kernel explainer sample config.toml: Sample input dataset for Kernel Explainer: Sample input dataset for Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_kernel_explainer_sample_size do? : mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_kernel_explainer_sample_size. : mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample size for input dataset passed to Kernel Explainer: . : Set the mli kernel explainer sample size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_sample_size", + "output": "mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_sample_size", + "output": "mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli kernel explainer sample size", + "output": "mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample size for input dataset passed to Kernel Explainer: ", + "output": "mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_kernel_explainer_sample_size", + "output": "mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_kernel_explainer_sample_size", + "output": "mli kernel explainer sample size config.toml: Sample size for input dataset passed to Kernel Explainer: Sample size for input dataset passed to Kernel Explainer." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_kernel_explainer_nsamples do? : mli kernel explainer nsamples config.toml: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_kernel_explainer_nsamples. : mli kernel explainer nsamples config.toml: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of times to re-evaluate the model when explaining each prediction with Kernel Explainer. Default is determined internally: . : Set the mli kernel explainer nsamples config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_nsamples", + "output": "mli kernel explainer nsamples config.toml: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_nsamples", + "output": "mli kernel explainer nsamples config.toml: Number of times to re-evaluate the model when explaining each prediction with Kernel Explainer. Default is determined internally: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli kernel explainer nsamples", + "output": "mli kernel explainer nsamples config.toml: Number of times to re-evaluate the model when explaining each prediction with Kernel Explainer. Default is determined internally: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of times to re-evaluate the model when explaining each prediction with Kernel Explainer. Default is determined internally: ", + "output": "mli kernel explainer nsamples config.toml: Number of times to re-evaluate the model when explaining each prediction with Kernel Explainer. Default is determined internally: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_kernel_explainer_nsamples", + "output": "mli kernel explainer nsamples config.toml: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_kernel_explainer_nsamples", + "output": "mli kernel explainer nsamples config.toml: Number of times to re-evaluate the model when explaining each prediction with Kernel Explainer. Default is determined internally: 'auto' or int. Number of times to re-evaluate the model when explaining each prediction. More samples lead to lower variance estimates of the SHAP values. The 'auto' setting uses nsamples = 2 * X.shape[1] + 2048. This setting is disabled by default and DAI determines the right number internally." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_kernel_explainer_l1_reg do? : mli kernel explainer l1 reg config.toml: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_kernel_explainer_l1_reg. : mli kernel explainer l1 reg config.toml: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: L1 regularization for Kernel Explainer: . : Set the mli kernel explainer l1 reg config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_l1_reg", + "output": "mli kernel explainer l1 reg config.toml: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_l1_reg", + "output": "mli kernel explainer l1 reg config.toml: L1 regularization for Kernel Explainer: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli kernel explainer l1 reg", + "output": "mli kernel explainer l1 reg config.toml: L1 regularization for Kernel Explainer: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "L1 regularization for Kernel Explainer: ", + "output": "mli kernel explainer l1 reg config.toml: L1 regularization for Kernel Explainer: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_kernel_explainer_l1_reg", + "output": "mli kernel explainer l1 reg config.toml: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_kernel_explainer_l1_reg", + "output": "mli kernel explainer l1 reg config.toml: L1 regularization for Kernel Explainer: 'num_features(int)', 'auto' (default for now, but deprecated), 'aic', 'bic', or float. The l1 regularization to use for feature selection (the estimation procedure is based on a debiased lasso). The 'auto' option currently uses aic when less that 20% of the possible sample space is enumerated, otherwise it uses no regularization. THE BEHAVIOR OF 'auto' WILL CHANGE in a future version to be based on 'num_features' instead of AIC. The aic and bic options use the AIC and BIC rules for regularization. Using 'num_features(int)' selects a fix number of top features. Passing a float directly sets the alpha parameter of the sklearn.linear_model.Lasso model used for feature selection." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_kernel_explainer_max_runtime do? : mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_kernel_explainer_max_runtime. : mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Max runtime for Kernel Explainer in seconds: . : Set the mli kernel explainer max runtime config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_max_runtime", + "output": "mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_kernel_explainer_max_runtime", + "output": "mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli kernel explainer max runtime", + "output": "mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max runtime for Kernel Explainer in seconds: ", + "output": "mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_kernel_explainer_max_runtime", + "output": "mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_kernel_explainer_max_runtime", + "output": "mli kernel explainer max runtime config.toml: Max runtime for Kernel Explainer in seconds: Max runtime for Kernel Explainer in seconds. Default is 900, which equates to 15 minutes. Setting this parameter to -1 means to honor the Kernel Shapley sample size provided regardless of max runtime." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_tokenizer do? : mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_tokenizer. : mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_tokenizer", + "output": "mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_tokenizer", + "output": "mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp tokenizer", + "output": "mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_tokenizer", + "output": "mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_tokenizer", + "output": "mli nlp tokenizer config.toml: Tokenizer used to extract tokens from text columns for MLI." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_top_n do? : mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_top_n. : mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of tokens used for MLI NLP explanations. -1 means all.: . : Set the mli nlp top n config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_top_n", + "output": "mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_top_n", + "output": "mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all.: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp top n", + "output": "mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all.: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of tokens used for MLI NLP explanations. -1 means all.: ", + "output": "mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all.: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_top_n", + "output": "mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_top_n", + "output": "mli nlp top n config.toml: Number of tokens used for MLI NLP explanations. -1 means all.: Number of tokens used for MLI NLP explanations. -1 means all." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_sample_limit do? : mli nlp sample limit config.toml: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_sample_limit. : mli nlp sample limit config.toml: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Sample size for MLI NLP explainers: . : Set the mli nlp sample limit config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_sample_limit", + "output": "mli nlp sample limit config.toml: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_sample_limit", + "output": "mli nlp sample limit config.toml: Sample size for MLI NLP explainers: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp sample limit", + "output": "mli nlp sample limit config.toml: Sample size for MLI NLP explainers: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Sample size for MLI NLP explainers: ", + "output": "mli nlp sample limit config.toml: Sample size for MLI NLP explainers: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_sample_limit", + "output": "mli nlp sample limit config.toml: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_sample_limit", + "output": "mli nlp sample limit config.toml: Sample size for MLI NLP explainers: Maximum number of records used by MLI NLP explainers" + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_min_df do? : mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_min_df. : mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: . : Set the mli nlp min df config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_df", + "output": "mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_df", + "output": "mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp min df", + "output": "mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: ", + "output": "mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_min_df", + "output": "mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_min_df", + "output": "mli nlp min df config.toml: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Minimum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_max_df do? : mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_max_df. : mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: . : Set the mli nlp max df config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_max_df", + "output": "mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_max_df", + "output": "mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp max df", + "output": "mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: ", + "output": "mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_max_df", + "output": "mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_max_df", + "output": "mli nlp max df config.toml: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage.: Maximum number of documents in which token has to appear. Integer mean absolute count, float means percentage." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_min_ngram do? : mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_min_ngram. : mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: . : Set the mli nlp min ngram config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_ngram", + "output": "mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_ngram", + "output": "mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp min ngram", + "output": "mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: ", + "output": "mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_min_ngram", + "output": "mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_min_ngram", + "output": "mli nlp min ngram config.toml: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The minimum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_max_ngram do? : mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_max_ngram. : mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: . : Set the mli nlp max ngram config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_max_ngram", + "output": "mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_max_ngram", + "output": "mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp max ngram", + "output": "mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: ", + "output": "mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_max_ngram", + "output": "mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_max_ngram", + "output": "mli nlp max ngram config.toml: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range.: The maximum value in the ngram range. The tokenizer will generate all possible tokens in the (mli_nlp_min_ngram, mli_nlp_max_ngram) range." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_min_token_mode do? : mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_min_token_mode. : mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens.: . : Set the mli nlp min token mode config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_token_mode", + "output": "mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_token_mode", + "output": "mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens.: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp min token mode", + "output": "mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens.: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Mode used to choose N tokens for MLI NLP.\n\"top\" chooses N top tokens.\n\"bottom\" chooses N bottom tokens.\n\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\n\"linspace\" chooses N evenly spaced out tokens.: ", + "output": "mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens.: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_min_token_mode", + "output": "mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_min_token_mode", + "output": "mli nlp min token mode config.toml: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens.: Mode used to choose N tokens for MLI NLP.\"top\" chooses N top tokens.\"bottom\" chooses N bottom tokens.\"top-bottom\" chooses math.floor(N/2) top and math.ceil(N/2) bottom tokens.\"linspace\" chooses N evenly spaced out tokens." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_tokenizer_max_features do? : mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_tokenizer_max_features. : mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The number of top tokens to be used as features when building token based feature importance.: . : Set the mli nlp tokenizer max features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_tokenizer_max_features", + "output": "mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_tokenizer_max_features", + "output": "mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance.: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp tokenizer max features", + "output": "mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance.: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The number of top tokens to be used as features when building token based feature importance.: ", + "output": "mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance.: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_tokenizer_max_features", + "output": "mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_tokenizer_max_features", + "output": "mli nlp tokenizer max features config.toml: The number of top tokens to be used as features when building token based feature importance.: The number of top tokens to be used as features when building token based feature importance." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_loco_max_features do? : mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_loco_max_features. : mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The number of top tokens to be used as features when computing text LOCO.: . : Set the mli nlp loco max features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_loco_max_features", + "output": "mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_loco_max_features", + "output": "mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO.: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp loco max features", + "output": "mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO.: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The number of top tokens to be used as features when computing text LOCO.: ", + "output": "mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO.: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_loco_max_features", + "output": "mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_loco_max_features", + "output": "mli nlp loco max features config.toml: The number of top tokens to be used as features when computing text LOCO.: The number of top tokens to be used as features when computing text LOCO." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_surrogate_tokenizer do? : mli nlp surrogate tokenizer config.toml: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_surrogate_tokenizer. : mli nlp surrogate tokenizer config.toml: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Tokenizer for surrogate models. Only applies to NLP models.: . : Set the mli nlp surrogate tokenizer config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_surrogate_tokenizer", + "output": "mli nlp surrogate tokenizer config.toml: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_surrogate_tokenizer", + "output": "mli nlp surrogate tokenizer config.toml: Tokenizer for surrogate models. Only applies to NLP models.: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp surrogate tokenizer", + "output": "mli nlp surrogate tokenizer config.toml: Tokenizer for surrogate models. Only applies to NLP models.: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Tokenizer for surrogate models. Only applies to NLP models.: ", + "output": "mli nlp surrogate tokenizer config.toml: Tokenizer for surrogate models. Only applies to NLP models.: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_surrogate_tokenizer", + "output": "mli nlp surrogate tokenizer config.toml: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_surrogate_tokenizer", + "output": "mli nlp surrogate tokenizer config.toml: Tokenizer for surrogate models. Only applies to NLP models.: The tokenizer method to use when tokenizing a dataset for surrogate models. Can either choose 'TF-IDF' or 'Linear Model + TF-IDF', which first runs TF-IDF to get tokens and then fits a linear model between the tokens and the target to get importances of tokens, which are based on coefficients of the linear model. Default is 'Linear Model + TF-IDF'. Only applies to NLP models." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_surrogate_tokens do? : mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_surrogate_tokens. : mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models.: . : Set the mli nlp surrogate tokens config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_surrogate_tokens", + "output": "mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_surrogate_tokens", + "output": "mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models.: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp surrogate tokens", + "output": "mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models.: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The number of top tokens to be used as features when building surrogate models. Only applies to NLP models.: ", + "output": "mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models.: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_surrogate_tokens", + "output": "mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_surrogate_tokens", + "output": "mli nlp surrogate tokens config.toml: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models.: The number of top tokens to be used as features when building surrogate models. Only applies to NLP models." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_use_stop_words do? : mli nlp use stop words config.toml: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_use_stop_words. : mli nlp use stop words config.toml: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Ignore stop words for MLI NLP.: . : Set the mli nlp use stop words config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_use_stop_words", + "output": "mli nlp use stop words config.toml: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_use_stop_words", + "output": "mli nlp use stop words config.toml: Ignore stop words for MLI NLP.: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp use stop words", + "output": "mli nlp use stop words config.toml: Ignore stop words for MLI NLP.: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Ignore stop words for MLI NLP.: ", + "output": "mli nlp use stop words config.toml: Ignore stop words for MLI NLP.: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_use_stop_words", + "output": "mli nlp use stop words config.toml: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_use_stop_words", + "output": "mli nlp use stop words config.toml: Ignore stop words for MLI NLP.: Ignore stop words for MLI NLP." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_stop_words do? : mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_stop_words. : mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']: . : Set the mli nlp stop words config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_stop_words", + "output": "mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_stop_words", + "output": "mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp stop words", + "output": "mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']: ", + "output": "mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_stop_words", + "output": "mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_stop_words", + "output": "mli nlp stop words config.toml: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']: List of words to filter out before generation of text tokens, which are passed to MLI NLP LOCO and surrogate models (if enabled). Default is 'english'. Pass in custom stop-words as a list, e.g., ['great', 'good']." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_append_to_english_stop_words do? : mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_append_to_english_stop_words. : mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Append passed in list of custom stop words to default 'english' stop words: . : Set the mli nlp append to english stop words config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_append_to_english_stop_words", + "output": "mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_append_to_english_stop_words", + "output": "mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp append to english stop words", + "output": "mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Append passed in list of custom stop words to default 'english' stop words: ", + "output": "mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_append_to_english_stop_words", + "output": "mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_append_to_english_stop_words", + "output": "mli nlp append to english stop words config.toml: Append passed in list of custom stop words to default 'english' stop words: Append passed in list of custom stop words to default 'english' stop words." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_image_enable do? : mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_image_enable. : mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_image_enable", + "output": "mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_image_enable", + "output": "mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli image enable", + "output": "mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_image_enable", + "output": "mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_image_enable", + "output": "mli image enable config.toml: Enable MLI for image experiments." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_max_explain_rows do? : mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_max_explain_rows. : mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The maximum number of rows allowed to get the local explanation result.: . : Set the mli max explain rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_explain_rows", + "output": "mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_max_explain_rows", + "output": "mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result.: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli max explain rows", + "output": "mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result.: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The maximum number of rows allowed to get the local explanation result.: ", + "output": "mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result.: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_max_explain_rows", + "output": "mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_max_explain_rows", + "output": "mli max explain rows config.toml: The maximum number of rows allowed to get the local explanation result.: The maximum number of rows allowed to get the local explanation result, increase the value may jeopardize overall performance, change the value only if necessary." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_max_tokens_rows do? : mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_max_tokens_rows. : mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The maximum number of rows allowed to get the NLP token importance result.: . : Set the mli nlp max tokens rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_max_tokens_rows", + "output": "mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_max_tokens_rows", + "output": "mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result.: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp max tokens rows", + "output": "mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result.: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The maximum number of rows allowed to get the NLP token importance result.: ", + "output": "mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result.: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_max_tokens_rows", + "output": "mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_max_tokens_rows", + "output": "mli nlp max tokens rows config.toml: The maximum number of rows allowed to get the NLP token importance result.: The maximum number of rows allowed to get the NLP token importance result, increasing the value may consume too much memory and negatively impact the performance, change the value only if necessary." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_nlp_min_parallel_rows do? : mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_nlp_min_parallel_rows. : mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: The minimum number of rows to enable parallel execution for NLP local explanations calculation.: . : Set the mli nlp min parallel rows config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_parallel_rows", + "output": "mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_nlp_min_parallel_rows", + "output": "mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation.: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli nlp min parallel rows", + "output": "mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation.: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "The minimum number of rows to enable parallel execution for NLP local explanations calculation.: ", + "output": "mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation.: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_nlp_min_parallel_rows", + "output": "mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_nlp_min_parallel_rows", + "output": "mli nlp min parallel rows config.toml: The minimum number of rows to enable parallel execution for NLP local explanations calculation.: The minimum number of rows to enable parallel execution for NLP local explanations calculation." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_run_legacy_defaults do? : mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_run_legacy_defaults. : mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Run legacy defaults in addition to current default explainers in MLI.: . : Set the mli run legacy defaults config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_run_legacy_defaults", + "output": "mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_run_legacy_defaults", + "output": "mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI.: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli run legacy defaults", + "output": "mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI.: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Run legacy defaults in addition to current default explainers in MLI.: ", + "output": "mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI.: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_run_legacy_defaults", + "output": "mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_run_legacy_defaults", + "output": "mli run legacy defaults config.toml: Run legacy defaults in addition to current default explainers in MLI.: Run legacy defaults in addition to current default explainers in MLI." + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_cluster_kwargs do? : dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_cluster_kwargs. : dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask CUDA/RAPIDS cluster settings for single node workers.: . : Set the dask cuda cluster kwargs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_cluster_kwargs", + "output": "dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_cluster_kwargs", + "output": "dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers.: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda cluster kwargs", + "output": "dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers.: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask CUDA/RAPIDS cluster settings for single node workers.: ", + "output": "dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers.: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_cluster_kwargs", + "output": "dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_cluster_kwargs", + "output": "dask cuda cluster kwargs config.toml: Set dask CUDA/RAPIDS cluster settings for single node workers.: Set dask CUDA/RAPIDS cluster settings for single node workers. Additional environment variables can be set, see: https://dask-cuda.readthedocs.io/en/latest/ucx.html#dask-scheduler e.g. for ucx use: {} dict version of: dict(n_workers=None, threads_per_worker=1, processes=True, memory_limit='auto', device_memory_limit=None, CUDA_VISIBLE_DEVICES=None, data=None, local_directory=None, protocol='ucx', enable_tcp_over_ucx=True, enable_infiniband=False, enable_nvlink=False, enable_rdmacm=False, ucx_net_devices='auto', rmm_pool_size='1GB') WARNING: Do not add arguments like {'n_workers': 1, 'processes': True, 'threads_per_worker': 1} this will lead to hangs, cuda cluster handles this itself. " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cluster_kwargs do? : dask cluster kwargs config.toml: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cluster_kwargs. : dask cluster kwargs config.toml: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask cluster settings for single node workers.: . : Set the dask cluster kwargs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cluster_kwargs", + "output": "dask cluster kwargs config.toml: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cluster_kwargs", + "output": "dask cluster kwargs config.toml: Set dask cluster settings for single node workers.: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cluster kwargs", + "output": "dask cluster kwargs config.toml: Set dask cluster settings for single node workers.: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask cluster settings for single node workers.: ", + "output": "dask cluster kwargs config.toml: Set dask cluster settings for single node workers.: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cluster_kwargs", + "output": "dask cluster kwargs config.toml: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cluster_kwargs", + "output": "dask cluster kwargs config.toml: Set dask cluster settings for single node workers.: Set dask cluster settings for single node workers. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_dask_cluster do? : enable dask cluster config.toml: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_dask_cluster. : enable dask cluster config.toml: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable dask scheduler and worker on singlenode/multinode setup: . : Set the enable dask cluster config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_dask_cluster", + "output": "enable dask cluster config.toml: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_dask_cluster", + "output": "enable dask cluster config.toml: Enable dask scheduler and worker on singlenode/multinode setup: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable dask cluster", + "output": "enable dask cluster config.toml: Enable dask scheduler and worker on singlenode/multinode setup: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable dask scheduler and worker on singlenode/multinode setup: ", + "output": "enable dask cluster config.toml: Enable dask scheduler and worker on singlenode/multinode setup: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_dask_cluster", + "output": "enable dask cluster config.toml: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_dask_cluster", + "output": "enable dask cluster config.toml: Enable dask scheduler and worker on singlenode/multinode setup: Whether to enable dask scheduler DAI server node and dask workers on DAI worker nodes. " + }, + { + "prompt_type": "plain", + "instruction": ": What does start_dask_worker do? : start dask worker config.toml: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain start_dask_worker. : start dask worker config.toml: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Start dask workers for given multinode worker: . : Set the start dask worker config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "start_dask_worker", + "output": "start dask worker config.toml: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "start_dask_worker", + "output": "start dask worker config.toml: Start dask workers for given multinode worker: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "start dask worker", + "output": "start dask worker config.toml: Start dask workers for given multinode worker: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Start dask workers for given multinode worker: ", + "output": "start dask worker config.toml: Start dask workers for given multinode worker: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting start_dask_worker", + "output": "start dask worker config.toml: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting start_dask_worker", + "output": "start dask worker config.toml: Start dask workers for given multinode worker: Whether to start dask workers on this multinode worker. " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_scheduler_env do? : dask scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_scheduler_env. : dask scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask scheduler env.: . : Set the dask scheduler env config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_scheduler_env", + "output": "dask scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_scheduler_env", + "output": "dask scheduler env config.toml: Set dask scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask scheduler env", + "output": "dask scheduler env config.toml: Set dask scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask scheduler env.: ", + "output": "dask scheduler env config.toml: Set dask scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_scheduler_env", + "output": "dask scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_scheduler_env", + "output": "dask scheduler env config.toml: Set dask scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_scheduler_env do? : dask cuda scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_scheduler_env. : dask cuda scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask cuda scheduler env.: . : Set the dask cuda scheduler env config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_scheduler_env", + "output": "dask cuda scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_scheduler_env", + "output": "dask cuda scheduler env config.toml: Set dask cuda scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda scheduler env", + "output": "dask cuda scheduler env config.toml: Set dask cuda scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask cuda scheduler env.: ", + "output": "dask cuda scheduler env config.toml: Set dask cuda scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_scheduler_env", + "output": "dask cuda scheduler env config.toml: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_scheduler_env", + "output": "dask cuda scheduler env config.toml: Set dask cuda scheduler env.: Set dask scheduler env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_scheduler_options do? : dask scheduler options config.toml: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_scheduler_options. : dask scheduler options config.toml: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask scheduler command-line options.: . : Set the dask scheduler options config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_scheduler_options", + "output": "dask scheduler options config.toml: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_scheduler_options", + "output": "dask scheduler options config.toml: Set dask scheduler command-line options.: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask scheduler options", + "output": "dask scheduler options config.toml: Set dask scheduler command-line options.: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask scheduler command-line options.: ", + "output": "dask scheduler options config.toml: Set dask scheduler command-line options.: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_scheduler_options", + "output": "dask scheduler options config.toml: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_scheduler_options", + "output": "dask scheduler options config.toml: Set dask scheduler command-line options.: Set dask scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_scheduler_options do? : dask cuda scheduler options config.toml: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_scheduler_options. : dask cuda scheduler options config.toml: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask cuda scheduler command-line options.: . : Set the dask cuda scheduler options config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_scheduler_options", + "output": "dask cuda scheduler options config.toml: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_scheduler_options", + "output": "dask cuda scheduler options config.toml: Set dask cuda scheduler command-line options.: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda scheduler options", + "output": "dask cuda scheduler options config.toml: Set dask cuda scheduler command-line options.: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask cuda scheduler command-line options.: ", + "output": "dask cuda scheduler options config.toml: Set dask cuda scheduler command-line options.: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_scheduler_options", + "output": "dask cuda scheduler options config.toml: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_scheduler_options", + "output": "dask cuda scheduler options config.toml: Set dask cuda scheduler command-line options.: Set dask cuda scheduler options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_worker_env do? : dask worker env config.toml: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_worker_env. : dask worker env config.toml: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask worker environment variables. NCCL_SOCKET_IFNAME is automatically set, but can be overridden here.: . : Set the dask worker env config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_env", + "output": "dask worker env config.toml: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_env", + "output": "dask worker env config.toml: Set dask worker environment variables. NCCL_SOCKET_IFNAME is automatically set, but can be overridden here.: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask worker env", + "output": "dask worker env config.toml: Set dask worker environment variables. NCCL_SOCKET_IFNAME is automatically set, but can be overridden here.: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask worker environment variables. NCCL_SOCKET_IFNAME is automatically set, but can be overridden here.: ", + "output": "dask worker env config.toml: Set dask worker environment variables. NCCL_SOCKET_IFNAME is automatically set, but can be overridden here.: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_worker_env", + "output": "dask worker env config.toml: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_worker_env", + "output": "dask worker env config.toml: Set dask worker environment variables. NCCL_SOCKET_IFNAME is automatically set, but can be overridden here.: Set dask worker env. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_worker_options do? : dask worker options config.toml: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_worker_options. : dask worker options config.toml: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask worker command-line options.: . : Set the dask worker options config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_options", + "output": "dask worker options config.toml: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_options", + "output": "dask worker options config.toml: Set dask worker command-line options.: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask worker options", + "output": "dask worker options config.toml: Set dask worker command-line options.: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask worker command-line options.: ", + "output": "dask worker options config.toml: Set dask worker command-line options.: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_worker_options", + "output": "dask worker options config.toml: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_worker_options", + "output": "dask worker options config.toml: Set dask worker command-line options.: Set dask worker options. See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_worker_options do? : dask cuda worker options config.toml: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_worker_options. : dask cuda worker options config.toml: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask cuda worker options.: . : Set the dask cuda worker options config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_worker_options", + "output": "dask cuda worker options config.toml: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_worker_options", + "output": "dask cuda worker options config.toml: Set dask cuda worker options.: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda worker options", + "output": "dask cuda worker options config.toml: Set dask cuda worker options.: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask cuda worker options.: ", + "output": "dask cuda worker options config.toml: Set dask cuda worker options.: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_worker_options", + "output": "dask cuda worker options config.toml: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_worker_options", + "output": "dask cuda worker options config.toml: Set dask cuda worker options.: Set dask cuda worker options. Similar options as dask_cuda_cluster_kwargs. See https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately \"--rmm-pool-size 1GB\" can be set to give 1GB to RMM for more efficient rapids " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_worker_env do? : dask cuda worker env config.toml: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_worker_env. : dask cuda worker env config.toml: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Set dask cuda worker environment variables.: . : Set the dask cuda worker env config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_worker_env", + "output": "dask cuda worker env config.toml: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_worker_env", + "output": "dask cuda worker env config.toml: Set dask cuda worker environment variables.: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda worker env", + "output": "dask cuda worker env config.toml: Set dask cuda worker environment variables.: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Set dask cuda worker environment variables.: ", + "output": "dask cuda worker env config.toml: Set dask cuda worker environment variables.: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_worker_env", + "output": "dask cuda worker env config.toml: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_worker_env", + "output": "dask cuda worker env config.toml: Set dask cuda worker environment variables.: Set dask cuda worker env. See: https://dask-cuda.readthedocs.io/en/latest/ucx.html#launching-scheduler-workers-and-clients-separately https://ucx-py.readthedocs.io/en/latest/dask.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_protocol do? : dask protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_protocol. : dask protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Protocol using for dask communications.: . : Set the dask protocol config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_protocol", + "output": "dask protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_protocol", + "output": "dask protocol config.toml: Protocol using for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask protocol", + "output": "dask protocol config.toml: Protocol using for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Protocol using for dask communications.: ", + "output": "dask protocol config.toml: Protocol using for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_protocol", + "output": "dask protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_protocol", + "output": "dask protocol config.toml: Protocol using for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_server_port do? : dask server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_server_port. : dask server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Port using by server for dask communications.: . : Set the dask server port config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_server_port", + "output": "dask server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_server_port", + "output": "dask server port config.toml: Port using by server for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask server port", + "output": "dask server port config.toml: Port using by server for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Port using by server for dask communications.: ", + "output": "dask server port config.toml: Port using by server for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_server_port", + "output": "dask server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_server_port", + "output": "dask server port config.toml: Port using by server for dask communications.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_dashboard_port do? : dask dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_dashboard_port. : dask dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Dask dashboard port for dask diagnostics.: . : Set the dask dashboard port config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_dashboard_port", + "output": "dask dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_dashboard_port", + "output": "dask dashboard port config.toml: Dask dashboard port for dask diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask dashboard port", + "output": "dask dashboard port config.toml: Dask dashboard port for dask diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Dask dashboard port for dask diagnostics.: ", + "output": "dask dashboard port config.toml: Dask dashboard port for dask diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_dashboard_port", + "output": "dask dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_dashboard_port", + "output": "dask dashboard port config.toml: Dask dashboard port for dask diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_protocol do? : dask cuda protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_protocol. : dask cuda protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Protocol using for dask cuda communications.: . : Set the dask cuda protocol config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_protocol", + "output": "dask cuda protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_protocol", + "output": "dask cuda protocol config.toml: Protocol using for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda protocol", + "output": "dask cuda protocol config.toml: Protocol using for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Protocol using for dask cuda communications.: ", + "output": "dask cuda protocol config.toml: Protocol using for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_protocol", + "output": "dask cuda protocol config.toml: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_protocol", + "output": "dask cuda protocol config.toml: Protocol using for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html e.g. ucx is optimal, while tcp is most reliable " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_server_port do? : dask cuda server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_server_port. : dask cuda server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Port using by server for dask cuda communications.: . : Set the dask cuda server port config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_server_port", + "output": "dask cuda server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_server_port", + "output": "dask cuda server port config.toml: Port using by server for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda server port", + "output": "dask cuda server port config.toml: Port using by server for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Port using by server for dask cuda communications.: ", + "output": "dask cuda server port config.toml: Port using by server for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_server_port", + "output": "dask cuda server port config.toml: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_server_port", + "output": "dask cuda server port config.toml: Port using by server for dask cuda communications.: See https://docs.dask.org/en/latest/setup/cli.html port + 1 is used for dask dashboard " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_dashboard_port do? : dask cuda dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_dashboard_port. : dask cuda dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Dask dashboard port for dask_cuda diagnostics.: . : Set the dask cuda dashboard port config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_dashboard_port", + "output": "dask cuda dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_dashboard_port", + "output": "dask cuda dashboard port config.toml: Dask dashboard port for dask_cuda diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda dashboard port", + "output": "dask cuda dashboard port config.toml: Dask dashboard port for dask_cuda diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Dask dashboard port for dask_cuda diagnostics.: ", + "output": "dask cuda dashboard port config.toml: Dask dashboard port for dask_cuda diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_dashboard_port", + "output": "dask cuda dashboard port config.toml: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_dashboard_port", + "output": "dask cuda dashboard port config.toml: Dask dashboard port for dask_cuda diagnostics.: See https://docs.dask.org/en/latest/setup/cli.html " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_server_ip do? : dask server ip config.toml: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_server_ip. : dask server ip config.toml: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: IP address using by server for dask and dask cuda communications.: . : Set the dask server ip config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_server_ip", + "output": "dask server ip config.toml: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_server_ip", + "output": "dask server ip config.toml: IP address using by server for dask and dask cuda communications.: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask server ip", + "output": "dask server ip config.toml: IP address using by server for dask and dask cuda communications.: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "IP address using by server for dask and dask cuda communications.: ", + "output": "dask server ip config.toml: IP address using by server for dask and dask cuda communications.: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_server_ip", + "output": "dask server ip config.toml: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_server_ip", + "output": "dask server ip config.toml: IP address using by server for dask and dask cuda communications.: If empty string, auto-detect IP capable of reaching network. Required to be set if using worker_mode=multinode. " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_worker_nprocs do? : dask worker nprocs config.toml: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_worker_nprocs. : dask worker nprocs config.toml: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of processes per dask worker.: . : Set the dask worker nprocs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_nprocs", + "output": "dask worker nprocs config.toml: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_nprocs", + "output": "dask worker nprocs config.toml: Number of processes per dask worker.: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask worker nprocs", + "output": "dask worker nprocs config.toml: Number of processes per dask worker.: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of processes per dask worker.: ", + "output": "dask worker nprocs config.toml: Number of processes per dask worker.: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_worker_nprocs", + "output": "dask worker nprocs config.toml: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_worker_nprocs", + "output": "dask worker nprocs config.toml: Number of processes per dask worker.: Number of processses per dask (not cuda-GPU) worker. If -1, uses dask default of cpu count + 1 + nprocs. If -2, uses DAI default of total number of physical cores. Recommended for heavy feature engineering. If 1, assumes tasks are mostly multi-threaded and can use entire node per task. Recommended for heavy multinode model training. Only applicable to dask (not dask_cuda) workers " + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_worker_nthreads do? : dask worker nthreads config.toml: Number of threads per process for dask workers" + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_worker_nthreads. : dask worker nthreads config.toml: Number of threads per process for dask workers" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of threads per process for dask.: . : Set the dask worker nthreads config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_nthreads", + "output": "dask worker nthreads config.toml: Number of threads per process for dask workers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_worker_nthreads", + "output": "dask worker nthreads config.toml: Number of threads per process for dask.: Number of threads per process for dask workers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask worker nthreads", + "output": "dask worker nthreads config.toml: Number of threads per process for dask.: Number of threads per process for dask workers" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of threads per process for dask.: ", + "output": "dask worker nthreads config.toml: Number of threads per process for dask.: Number of threads per process for dask workers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_worker_nthreads", + "output": "dask worker nthreads config.toml: Number of threads per process for dask workers" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_worker_nthreads", + "output": "dask worker nthreads config.toml: Number of threads per process for dask.: Number of threads per process for dask workers" + }, + { + "prompt_type": "plain", + "instruction": ": What does dask_cuda_worker_nthreads do? : dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dask_cuda_worker_nthreads. : dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of threads per process for dask_cuda.: . : Set the dask cuda worker nthreads config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_worker_nthreads", + "output": "dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask_cuda_worker_nthreads", + "output": "dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda.: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dask cuda worker nthreads", + "output": "dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda.: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of threads per process for dask_cuda.: ", + "output": "dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda.: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dask_cuda_worker_nthreads", + "output": "dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dask_cuda_worker_nthreads", + "output": "dask cuda worker nthreads config.toml: Number of threads per process for dask_cuda.: Number of threads per process for dask_cuda workers If -2, uses DAI default of physical cores per GPU, since must have 1 worker/GPU only. " + }, + { + "prompt_type": "plain", + "instruction": ": What does lightgbm_listen_port do? : lightgbm listen port config.toml: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "plain", + "instruction": ": Explain lightgbm_listen_port. : lightgbm listen port config.toml: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: LightGBM local listen port when using dask with lightgbm: . : Set the lightgbm listen port config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_listen_port", + "output": "lightgbm listen port config.toml: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm_listen_port", + "output": "lightgbm listen port config.toml: LightGBM local listen port when using dask with lightgbm: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lightgbm listen port", + "output": "lightgbm listen port config.toml: LightGBM local listen port when using dask with lightgbm: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "LightGBM local listen port when using dask with lightgbm: ", + "output": "lightgbm listen port config.toml: LightGBM local listen port when using dask with lightgbm: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lightgbm_listen_port", + "output": "lightgbm listen port config.toml: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lightgbm_listen_port", + "output": "lightgbm listen port config.toml: LightGBM local listen port when using dask with lightgbm: See https://github.com/dask/dask-lightgbm " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_jupyter_server do? : enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_jupyter_server. : enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_jupyter_server", + "output": "enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_jupyter_server", + "output": "enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable jupyter server", + "output": "enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_jupyter_server", + "output": "enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_jupyter_server", + "output": "enable jupyter server config.toml: Whether to enable jupyter server" + }, + { + "prompt_type": "plain", + "instruction": ": What does jupyter_server_port do? : jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "plain", + "instruction": ": Explain jupyter_server_port. : jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jupyter_server_port", + "output": "jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jupyter_server_port", + "output": "jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "jupyter server port", + "output": "jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting jupyter_server_port", + "output": "jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting jupyter_server_port", + "output": "jupyter server port config.toml: Port for jupyter server" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_jupyter_server_browser do? : enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_jupyter_server_browser. : enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_jupyter_server_browser", + "output": "enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_jupyter_server_browser", + "output": "enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable jupyter server browser", + "output": "enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_jupyter_server_browser", + "output": "enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_jupyter_server_browser", + "output": "enable jupyter server browser config.toml: Whether to enable jupyter server browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_jupyter_server_browser_root do? : enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_jupyter_server_browser_root. : enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_jupyter_server_browser_root", + "output": "enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_jupyter_server_browser_root", + "output": "enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable jupyter server browser root", + "output": "enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_jupyter_server_browser_root", + "output": "enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_jupyter_server_browser_root", + "output": "enable jupyter server browser root config.toml: Whether to root access to jupyter server browser" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_triton_server_local do? : enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_triton_server_local. : enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_triton_server_local", + "output": "enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_triton_server_local", + "output": "enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable triton server local", + "output": "enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_triton_server_local", + "output": "enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_triton_server_local", + "output": "enable triton server local config.toml: Whether to enable built-in Triton inference server. If false, can still connect to remote Triton inference server by setting triton_host. If true, will start built-in Triton inference server." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_host_local do? : triton host local config.toml: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_host_local. : triton host local config.toml: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Hostname of built-in Triton inference server.: . : Set the triton host local config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_host_local", + "output": "triton host local config.toml: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_host_local", + "output": "triton host local config.toml: Hostname of built-in Triton inference server.: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton host local", + "output": "triton host local config.toml: Hostname of built-in Triton inference server.: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Hostname of built-in Triton inference server.: ", + "output": "triton host local config.toml: Hostname of built-in Triton inference server.: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_host_local", + "output": "triton host local config.toml: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_host_local", + "output": "triton host local config.toml: Hostname of built-in Triton inference server.: Hostname (or IP address) of built-in Triton inference service, to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. Only needed if enable_triton_server_local is disabled. Required to be set for some systems, like AWS, for networking packages to reach the server. " + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_server_params_local do? : triton server params local config.toml: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_server_params_local. : triton server params local config.toml: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Built-in Triton server command line arguments.: . : Set the triton server params local config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_server_params_local", + "output": "triton server params local config.toml: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_server_params_local", + "output": "triton server params local config.toml: Built-in Triton server command line arguments.: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton server params local", + "output": "triton server params local config.toml: Built-in Triton server command line arguments.: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Built-in Triton server command line arguments.: ", + "output": "triton server params local config.toml: Built-in Triton server command line arguments.: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_server_params_local", + "output": "triton server params local config.toml: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_server_params_local", + "output": "triton server params local config.toml: Built-in Triton server command line arguments.: Set Triton server command line arguments passed with --key=value." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_model_repository_dir_local do? : triton model repository dir local config.toml: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_model_repository_dir_local. : triton model repository dir local config.toml: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Path to Triton model repository.: . : Set the triton model repository dir local config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_model_repository_dir_local", + "output": "triton model repository dir local config.toml: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_model_repository_dir_local", + "output": "triton model repository dir local config.toml: Path to Triton model repository.: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton model repository dir local", + "output": "triton model repository dir local config.toml: Path to Triton model repository.: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Path to Triton model repository.: ", + "output": "triton model repository dir local config.toml: Path to Triton model repository.: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_model_repository_dir_local", + "output": "triton model repository dir local config.toml: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_model_repository_dir_local", + "output": "triton model repository dir local config.toml: Path to Triton model repository.: Path to model repository (relative to data_directory) for local Triton inference server built-in to Driverless AI. All Triton deployments for all users are stored in this directory." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_server_core_chunk_size_local do? : triton server core chunk size local config.toml: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_server_core_chunk_size_local. : triton server core chunk size local config.toml: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of cores to use for each model.: . : Set the triton server core chunk size local config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_server_core_chunk_size_local", + "output": "triton server core chunk size local config.toml: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_server_core_chunk_size_local", + "output": "triton server core chunk size local config.toml: Number of cores to use for each model.: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton server core chunk size local", + "output": "triton server core chunk size local config.toml: Number of cores to use for each model.: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of cores to use for each model.: ", + "output": "triton server core chunk size local config.toml: Number of cores to use for each model.: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_server_core_chunk_size_local", + "output": "triton server core chunk size local config.toml: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_server_core_chunk_size_local", + "output": "triton server core chunk size local config.toml: Number of cores to use for each model.: Number of cores to specify as resource, so that C++ MOJO can use its own multi-threaded parallel row batching to save memory and increase performance. A value of 1 is most portable across any Triton server, and is the most efficient use of resources for small (e.g. 1) batch sizes, while 4 is reasonable default assuming requests are batched." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_host_remote do? : triton host remote config.toml: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_host_remote. : triton host remote config.toml: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Hostname of remote Triton inference server.: . : Set the triton host remote config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_host_remote", + "output": "triton host remote config.toml: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_host_remote", + "output": "triton host remote config.toml: Hostname of remote Triton inference server.: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton host remote", + "output": "triton host remote config.toml: Hostname of remote Triton inference server.: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Hostname of remote Triton inference server.: ", + "output": "triton host remote config.toml: Hostname of remote Triton inference server.: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_host_remote", + "output": "triton host remote config.toml: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_host_remote", + "output": "triton host remote config.toml: Hostname of remote Triton inference server.: Hostname (or IP address) of remote Triton inference service (outside of DAI), to be used when auto_deploy_triton_scoring_pipeline and make_triton_scoring_pipeline are not disabled. If set, check triton_model_repository_dir_remote and triton_server_params_remote as well. " + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_model_repository_dir_remote do? : triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_model_repository_dir_remote. : triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_model_repository_dir_remote", + "output": "triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_model_repository_dir_remote", + "output": "triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton model repository dir remote", + "output": "triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_model_repository_dir_remote", + "output": "triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_model_repository_dir_remote", + "output": "triton model repository dir remote config.toml: Path to model repository directory for remote Triton inference server outside of Driverless AI. All Triton deployments for all users are stored in this directory. Requires write access to this directory from Driverless AI (shared file system). This setting is optional. If not provided, will upload each model deployment over gRPC protocol." + }, + { + "prompt_type": "plain", + "instruction": ": What does triton_server_params_remote do? : triton server params remote config.toml: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "plain", + "instruction": ": Explain triton_server_params_remote. : triton server params remote config.toml: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Remote Triton server parameters, used to connect via tritonclient: . : Set the triton server params remote config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_server_params_remote", + "output": "triton server params remote config.toml: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton_server_params_remote", + "output": "triton server params remote config.toml: Remote Triton server parameters, used to connect via tritonclient: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "triton server params remote", + "output": "triton server params remote config.toml: Remote Triton server parameters, used to connect via tritonclient: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Remote Triton server parameters, used to connect via tritonclient: ", + "output": "triton server params remote config.toml: Remote Triton server parameters, used to connect via tritonclient: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting triton_server_params_remote", + "output": "triton server params remote config.toml: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting triton_server_params_remote", + "output": "triton server params remote config.toml: Remote Triton server parameters, used to connect via tritonclient: Parameters to connect to remote Triton server, only used if triton_host_remote and triton_model_repository_dir_remote are set. ." + }, + { + "prompt_type": "plain", + "instruction": ": What does multinode_enable_strict_queue_policy do? : multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "plain", + "instruction": ": Explain multinode_enable_strict_queue_policy. : multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_enable_strict_queue_policy", + "output": "multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_enable_strict_queue_policy", + "output": "multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode enable strict queue policy", + "output": "multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting multinode_enable_strict_queue_policy", + "output": "multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting multinode_enable_strict_queue_policy", + "output": "multinode enable strict queue policy config.toml: When set to true, CPU executors will strictly run just CPU tasks." + }, + { + "prompt_type": "plain", + "instruction": ": What does multinode_enable_cpu_tasks_on_gpu_machines do? : multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "plain", + "instruction": ": Explain multinode_enable_cpu_tasks_on_gpu_machines. : multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_enable_cpu_tasks_on_gpu_machines", + "output": "multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_enable_cpu_tasks_on_gpu_machines", + "output": "multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode enable cpu tasks on gpu machines", + "output": "multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting multinode_enable_cpu_tasks_on_gpu_machines", + "output": "multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting multinode_enable_cpu_tasks_on_gpu_machines", + "output": "multinode enable cpu tasks on gpu machines config.toml: Controls whether CPU tasks can run on GPU machines." + }, + { + "prompt_type": "plain", + "instruction": ": What does multinode_storage_medium do? : multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain multinode_storage_medium. : multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_storage_medium", + "output": "multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_storage_medium", + "output": "multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode storage medium", + "output": "multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting multinode_storage_medium", + "output": "multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting multinode_storage_medium", + "output": "multinode storage medium config.toml: Storage medium to be used to exchange data between main server and remote worker nodes." + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_mode do? : worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_mode. : worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_mode", + "output": "worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_mode", + "output": "worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker mode", + "output": "worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_mode", + "output": "worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_mode", + "output": "worker mode config.toml: How the long running tasks are scheduled. multiprocessing: forks the current process immediately. singlenode: shares the task through redis and needs a worker running. multinode: same as singlenode and also shares the data through minio and allows worker to run on the different machine. " + }, + { + "prompt_type": "plain", + "instruction": ": What does redis_ip do? : redis ip config.toml: Redis settings" + }, + { + "prompt_type": "plain", + "instruction": ": Explain redis_ip. : redis ip config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_ip", + "output": "redis ip config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_ip", + "output": "redis ip config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis ip", + "output": "redis ip config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "redis ip config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting redis_ip", + "output": "redis ip config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting redis_ip", + "output": "redis ip config.toml: Redis settings" + }, + { + "prompt_type": "plain", + "instruction": ": What does redis_port do? : redis port config.toml: Redis settings" + }, + { + "prompt_type": "plain", + "instruction": ": Explain redis_port. : redis port config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_port", + "output": "redis port config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_port", + "output": "redis port config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis port", + "output": "redis port config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "redis port config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting redis_port", + "output": "redis port config.toml: Redis settings" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting redis_port", + "output": "redis port config.toml: Redis settings" + }, + { + "prompt_type": "plain", + "instruction": ": What does redis_db do? : redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "plain", + "instruction": ": Explain redis_db. : redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_db", + "output": "redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_db", + "output": "redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis db", + "output": "redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting redis_db", + "output": "redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting redis_db", + "output": "redis db config.toml: Redis database. Each DAI instance running on the redis server should have unique integer." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_redis_password do? : main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_redis_password. : main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_redis_password", + "output": "main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_redis_password", + "output": "main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server redis password", + "output": "main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_redis_password", + "output": "main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_redis_password", + "output": "main server redis password config.toml: Redis password. Will be randomly generated main server startup, and by default it will show up in config file uncommented.If you are running more than one DriverlessAI instance per system, make sure each and every instance is connected to its own redis queue." + }, + { + "prompt_type": "plain", + "instruction": ": What does redis_encrypt_config do? : redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "plain", + "instruction": ": Explain redis_encrypt_config. : redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_encrypt_config", + "output": "redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_encrypt_config", + "output": "redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis encrypt config", + "output": "redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting redis_encrypt_config", + "output": "redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting redis_encrypt_config", + "output": "redis encrypt config config.toml: If set to true, the config will get encrypted before it gets saved into the Redis database." + }, + { + "prompt_type": "plain", + "instruction": ": What does local_minio_port do? : local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain local_minio_port. : local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local_minio_port", + "output": "local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local_minio_port", + "output": "local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "local minio port", + "output": "local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting local_minio_port", + "output": "local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting local_minio_port", + "output": "local minio port config.toml: The port that Minio will listen on, this only takes effect if the current system is a multinode main server." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_minio_address do? : main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_minio_address. : main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_address", + "output": "main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_address", + "output": "main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server minio address", + "output": "main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_minio_address", + "output": "main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_minio_address", + "output": "main server minio address config.toml: Location of main server's minio server." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_minio_access_key_id do? : main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_minio_access_key_id. : main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_access_key_id", + "output": "main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_access_key_id", + "output": "main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server minio access key id", + "output": "main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_minio_access_key_id", + "output": "main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_minio_access_key_id", + "output": "main server minio access key id config.toml: Access key of main server's minio server." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_minio_secret_access_key do? : main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_minio_secret_access_key. : main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_secret_access_key", + "output": "main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_secret_access_key", + "output": "main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server minio secret access key", + "output": "main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_minio_secret_access_key", + "output": "main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_minio_secret_access_key", + "output": "main server minio secret access key config.toml: Secret access key of main server's minio server." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_minio_bucket do? : main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_minio_bucket. : main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_bucket", + "output": "main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_bucket", + "output": "main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server minio bucket", + "output": "main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_minio_bucket", + "output": "main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_minio_bucket", + "output": "main server minio bucket config.toml: Name of minio bucket used for file synchronization." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_s3_access_key_id do? : main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_s3_access_key_id. : main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_s3_access_key_id", + "output": "main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_s3_access_key_id", + "output": "main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server s3 access key id", + "output": "main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_s3_access_key_id", + "output": "main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_s3_access_key_id", + "output": "main server s3 access key id config.toml: S3 global access key." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_s3_secret_access_key do? : main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_s3_secret_access_key. : main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_s3_secret_access_key", + "output": "main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_s3_secret_access_key", + "output": "main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server s3 secret access key", + "output": "main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_s3_secret_access_key", + "output": "main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_s3_secret_access_key", + "output": "main server s3 secret access key config.toml: S3 global secret access key" + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_s3_bucket do? : main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_s3_bucket. : main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_s3_bucket", + "output": "main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_s3_bucket", + "output": "main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server s3 bucket", + "output": "main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_s3_bucket", + "output": "main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_s3_bucket", + "output": "main server s3 bucket config.toml: S3 bucket." + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_local_processors do? : worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_local_processors. : worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_local_processors", + "output": "worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_local_processors", + "output": "worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker local processors", + "output": "worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_local_processors", + "output": "worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_local_processors", + "output": "worker local processors config.toml: Maximum number of local tasks processed at once, limited to no more than total number of physical (not virtual) cores divided by two (minimum of 1)." + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_priority_queues_processors do? : worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_priority_queues_processors. : worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_priority_queues_processors", + "output": "worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_priority_queues_processors", + "output": "worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker priority queues processors", + "output": "worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_priority_queues_processors", + "output": "worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_priority_queues_processors", + "output": "worker priority queues processors config.toml: A concurrency limit for the 3 priority queues, only enabled when worker_remote_processors is greater than 0." + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_priority_queues_time_check do? : worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_priority_queues_time_check. : worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_priority_queues_time_check", + "output": "worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_priority_queues_time_check", + "output": "worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker priority queues time check", + "output": "worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_priority_queues_time_check", + "output": "worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_priority_queues_time_check", + "output": "worker priority queues time check config.toml: A timeout before which a scheduled task is bumped up in priority" + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_remote_processors do? : worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_remote_processors. : worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_remote_processors", + "output": "worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_remote_processors", + "output": "worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker remote processors", + "output": "worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_remote_processors", + "output": "worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_remote_processors", + "output": "worker remote processors config.toml: Maximum number of remote tasks processed at once, if value is set to -1 the system will automatically pick a reasonable limit depending on the number of available virtual CPU cores." + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_remote_processors_max_threads_reduction_factor do? : worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_remote_processors_max_threads_reduction_factor. : worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_remote_processors_max_threads_reduction_factor", + "output": "worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_remote_processors_max_threads_reduction_factor", + "output": "worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker remote processors max threads reduction factor", + "output": "worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_remote_processors_max_threads_reduction_factor", + "output": "worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_remote_processors_max_threads_reduction_factor", + "output": "worker remote processors max threads reduction factor config.toml: If worker_remote_processors >= 3, factor by which each task reduces threads, used by various packages like datatable, lightgbm, xgboost, etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does multinode_tmpfs do? : multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain multinode_tmpfs. : multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_tmpfs", + "output": "multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_tmpfs", + "output": "multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode tmpfs", + "output": "multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting multinode_tmpfs", + "output": "multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting multinode_tmpfs", + "output": "multinode tmpfs config.toml: Temporary file system location for multinode data transfer. This has to be an absolute path with equivalent configuration on both the main server and remote workers." + }, + { + "prompt_type": "plain", + "instruction": ": What does multinode_store_datasets_in_tmpfs do? : multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "plain", + "instruction": ": Explain multinode_store_datasets_in_tmpfs. : multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_store_datasets_in_tmpfs", + "output": "multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode_store_datasets_in_tmpfs", + "output": "multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "multinode store datasets in tmpfs", + "output": "multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting multinode_store_datasets_in_tmpfs", + "output": "multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting multinode_store_datasets_in_tmpfs", + "output": "multinode store datasets in tmpfs config.toml: When set to true, will use the 'multinode_tmpfs' as datasets store." + }, + { + "prompt_type": "plain", + "instruction": ": What does redis_result_queue_polling_interval do? : redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "plain", + "instruction": ": Explain redis_result_queue_polling_interval. : redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_result_queue_polling_interval", + "output": "redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis_result_queue_polling_interval", + "output": "redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "redis result queue polling interval", + "output": "redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting redis_result_queue_polling_interval", + "output": "redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting redis_result_queue_polling_interval", + "output": "redis result queue polling interval config.toml: How often the server should extract results from redis queue in milliseconds." + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_sleep do? : worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_sleep. : worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_sleep", + "output": "worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_sleep", + "output": "worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker sleep", + "output": "worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_sleep", + "output": "worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_sleep", + "output": "worker sleep config.toml: Sleep time for worker loop." + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_minio_bucket_ping_timeout do? : main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_minio_bucket_ping_timeout. : main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_bucket_ping_timeout", + "output": "main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_minio_bucket_ping_timeout", + "output": "main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server minio bucket ping timeout", + "output": "main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_minio_bucket_ping_timeout", + "output": "main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_minio_bucket_ping_timeout", + "output": "main server minio bucket ping timeout config.toml: For how many seconds worker should wait for main server minio bucket before it fails" + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_start_timeout do? : worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_start_timeout. : worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_start_timeout", + "output": "worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_start_timeout", + "output": "worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker start timeout", + "output": "worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_start_timeout", + "output": "worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_start_timeout", + "output": "worker start timeout config.toml: How long the worker should wait on redis db initialization in seconds." + }, + { + "prompt_type": "plain", + "instruction": ": What does worker_healthy_response_period do? : worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "plain", + "instruction": ": Explain worker_healthy_response_period. : worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_healthy_response_period", + "output": "worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker_healthy_response_period", + "output": "worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "worker healthy response period", + "output": "worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting worker_healthy_response_period", + "output": "worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting worker_healthy_response_period", + "output": "worker healthy response period config.toml: For how many seconds the worker shouldn't respond to be marked unhealthy." + }, + { + "prompt_type": "plain", + "instruction": ": What does expose_server_version do? : expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain expose_server_version. : expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "expose_server_version", + "output": "expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "expose_server_version", + "output": "expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "expose server version", + "output": "expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting expose_server_version", + "output": "expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting expose_server_version", + "output": "expose server version config.toml: Exposes the DriverlessAI base version when enabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_https do? : enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_https. : enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_https", + "output": "enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_https", + "output": "enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable https", + "output": "enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_https", + "output": "enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_https", + "output": "enable https config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_key_file do? : ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_key_file. : ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_key_file", + "output": "ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_key_file", + "output": "ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl key file", + "output": "ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_key_file", + "output": "ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_key_file", + "output": "ssl key file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_crt_file do? : ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_crt_file. : ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_crt_file", + "output": "ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_crt_file", + "output": "ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl crt file", + "output": "ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_crt_file", + "output": "ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_crt_file", + "output": "ssl crt file config.toml: https settings You can make a self-signed certificate for testing with the following commands: sudo openssl req -x509 -newkey rsa:4096 -keyout private_key.pem -out cert.pem -days 3650 -nodes -subj '/O=Driverless AI' sudo chown dai:dai cert.pem private_key.pem sudo chmod 600 cert.pem private_key.pem sudo mv cert.pem private_key.pem /etc/dai" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_key_passphrase do? : ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_key_passphrase. : ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_key_passphrase", + "output": "ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_key_passphrase", + "output": "ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl key passphrase", + "output": "ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_key_passphrase", + "output": "ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_key_passphrase", + "output": "ssl key passphrase config.toml: https settings Passphrase for the ssl_key_file, either use this setting or ssl_key_passphrase_file, or neither if no passphrase is used." + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_key_passphrase_file do? : ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_key_passphrase_file. : ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_key_passphrase_file", + "output": "ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_key_passphrase_file", + "output": "ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl key passphrase file", + "output": "ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_key_passphrase_file", + "output": "ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_key_passphrase_file", + "output": "ssl key passphrase file config.toml: https settings Passphrase file for the ssl_key_file, either use this setting or ssl_key_passphrase, or neither if no passphrase is used." + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_no_sslv2 do? : ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_no_sslv2. : ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_sslv2", + "output": "ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_sslv2", + "output": "ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl no sslv2", + "output": "ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_no_sslv2", + "output": "ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_no_sslv2", + "output": "ssl no sslv2 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_no_sslv3 do? : ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_no_sslv3. : ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_sslv3", + "output": "ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_sslv3", + "output": "ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl no sslv3", + "output": "ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_no_sslv3", + "output": "ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_no_sslv3", + "output": "ssl no sslv3 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_no_tlsv1 do? : ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_no_tlsv1. : ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1", + "output": "ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1", + "output": "ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl no tlsv1", + "output": "ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_no_tlsv1", + "output": "ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_no_tlsv1", + "output": "ssl no tlsv1 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_no_tlsv1_1 do? : ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_no_tlsv1_1. : ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1_1", + "output": "ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1_1", + "output": "ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl no tlsv1 1", + "output": "ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_no_tlsv1_1", + "output": "ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_no_tlsv1_1", + "output": "ssl no tlsv1 1 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_no_tlsv1_2 do? : ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_no_tlsv1_2. : ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1_2", + "output": "ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1_2", + "output": "ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl no tlsv1 2", + "output": "ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_no_tlsv1_2", + "output": "ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_no_tlsv1_2", + "output": "ssl no tlsv1 2 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_no_tlsv1_3 do? : ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_no_tlsv1_3. : ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1_3", + "output": "ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_no_tlsv1_3", + "output": "ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl no tlsv1 3", + "output": "ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_no_tlsv1_3", + "output": "ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_no_tlsv1_3", + "output": "ssl no tlsv1 3 config.toml: SSL TLS" + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_client_verify_mode do? : ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_client_verify_mode. : ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_client_verify_mode", + "output": "ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_client_verify_mode", + "output": "ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl client verify mode", + "output": "ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_client_verify_mode", + "output": "ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_client_verify_mode", + "output": "ssl client verify mode config.toml: https settings Sets the client verification mode. CERT_NONE: Client does not need to provide the certificate and if it does any verification errors are ignored. CERT_OPTIONAL: Client does not need to provide the certificate and if it does certificate is verified against set up CA chains. CERT_REQUIRED: Client needs to provide a certificate and certificate is verified. You'll need to set 'ssl_client_key_file' and 'ssl_client_crt_file' When this mode is selected for Driverless to be able to verify it's own callback requests. " + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_ca_file do? : ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_ca_file. : ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_ca_file", + "output": "ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_ca_file", + "output": "ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl ca file", + "output": "ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_ca_file", + "output": "ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_ca_file", + "output": "ssl ca file config.toml: https settings Path to the Certification Authority certificate file. This certificate will be used when to verify client certificate when client authentication is turned on. If this is not set, clients are verified using default system certificates. " + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_client_key_file do? : ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_client_key_file. : ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_client_key_file", + "output": "ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_client_key_file", + "output": "ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl client key file", + "output": "ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_client_key_file", + "output": "ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_client_key_file", + "output": "ssl client key file config.toml: https settings path to the private key that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "plain", + "instruction": ": What does ssl_client_crt_file do? : ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ssl_client_crt_file. : ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_client_crt_file", + "output": "ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl_client_crt_file", + "output": "ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ssl client crt file", + "output": "ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ssl_client_crt_file", + "output": "ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ssl_client_crt_file", + "output": "ssl client crt file config.toml: https settings path to the client certificate that Driverless will use to authenticate itself when CERT_REQUIRED mode is set. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_xsrf_protection do? : enable xsrf protection config.toml: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_xsrf_protection. : enable xsrf protection config.toml: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable XSRF Webserver protection: . : Set the enable xsrf protection config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xsrf_protection", + "output": "enable xsrf protection config.toml: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_xsrf_protection", + "output": "enable xsrf protection config.toml: Enable XSRF Webserver protection: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable xsrf protection", + "output": "enable xsrf protection config.toml: Enable XSRF Webserver protection: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable XSRF Webserver protection: ", + "output": "enable xsrf protection config.toml: Enable XSRF Webserver protection: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_xsrf_protection", + "output": "enable xsrf protection config.toml: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_xsrf_protection", + "output": "enable xsrf protection config.toml: Enable XSRF Webserver protection: If enabled, webserver will serve xsrf cookies and verify their validity upon every POST request" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_secure_cookies do? : enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_secure_cookies. : enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_secure_cookies", + "output": "enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_secure_cookies", + "output": "enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable secure cookies", + "output": "enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable secure flag on HTTP cookies: ", + "output": "enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_secure_cookies", + "output": "enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_secure_cookies", + "output": "enable secure cookies config.toml: Enable secure flag on HTTP cookies: " + }, + { + "prompt_type": "plain", + "instruction": ": What does verify_session_ip do? : verify session ip config.toml: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "plain", + "instruction": ": Explain verify_session_ip. : verify session ip config.toml: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: When enabled, webserver verifies session and request IP address: . : Set the verify session ip config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "verify_session_ip", + "output": "verify session ip config.toml: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "verify_session_ip", + "output": "verify session ip config.toml: When enabled, webserver verifies session and request IP address: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "verify session ip", + "output": "verify session ip config.toml: When enabled, webserver verifies session and request IP address: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "When enabled, webserver verifies session and request IP address: ", + "output": "verify session ip config.toml: When enabled, webserver verifies session and request IP address: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting verify_session_ip", + "output": "verify session ip config.toml: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting verify_session_ip", + "output": "verify session ip config.toml: When enabled, webserver verifies session and request IP address: When enabled each authenticated access will be verified comparing IP address of initiator of session and current request IP" + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipe_security_analysis_enabled do? : custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipe_security_analysis_enabled. : custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_security_analysis_enabled", + "output": "custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_security_analysis_enabled", + "output": "custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipe security analysis enabled", + "output": "custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipe_security_analysis_enabled", + "output": "custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipe_security_analysis_enabled", + "output": "custom recipe security analysis enabled config.toml: Enables automatic detection for forbidden/dangerous constructs in custom recipe" + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipe_import_allowlist do? : custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipe_import_allowlist. : custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_import_allowlist", + "output": "custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_import_allowlist", + "output": "custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipe import allowlist", + "output": "custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipe_import_allowlist", + "output": "custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipe_import_allowlist", + "output": "custom recipe import allowlist config.toml: List of modules that can be imported in custom recipes. Default empty list means all modules are allowed except for banlisted ones" + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipe_import_banlist do? : custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipe_import_banlist. : custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_import_banlist", + "output": "custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_import_banlist", + "output": "custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipe import banlist", + "output": "custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipe_import_banlist", + "output": "custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipe_import_banlist", + "output": "custom recipe import banlist config.toml: List of modules that cannot be imported in custom recipes" + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipe_method_call_allowlist do? : custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipe_method_call_allowlist. : custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_method_call_allowlist", + "output": "custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_method_call_allowlist", + "output": "custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipe method call allowlist", + "output": "custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipe_method_call_allowlist", + "output": "custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipe_method_call_allowlist", + "output": "custom recipe method call allowlist config.toml: Regex pattern list of calls which are allowed in custom recipes. Empty list means everything (except for banlist) is allowed. E.g. if only `os.path.*` is in allowlist, custom recipe can only call methods from `os.path` module and the built in ones " + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipe_method_call_banlist do? : custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipe_method_call_banlist. : custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_method_call_banlist", + "output": "custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_method_call_banlist", + "output": "custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipe method call banlist", + "output": "custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipe_method_call_banlist", + "output": "custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipe_method_call_banlist", + "output": "custom recipe method call banlist config.toml: Regex pattern list of calls which need to be rejected in custom recipes. E.g. if `os.system` in banlist, custom recipe cannot call `os.system()`. If `socket.*` in banlist, recipe cannot call any method of socket module such as `socket.socket()` or any `socket.a.b.c()` " + }, + { + "prompt_type": "plain", + "instruction": ": What does custom_recipe_dangerous_patterns do? : custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "plain", + "instruction": ": Explain custom_recipe_dangerous_patterns. : custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_dangerous_patterns", + "output": "custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom_recipe_dangerous_patterns", + "output": "custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "custom recipe dangerous patterns", + "output": "custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting custom_recipe_dangerous_patterns", + "output": "custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting custom_recipe_dangerous_patterns", + "output": "custom recipe dangerous patterns config.toml: List of regex patterns representing dangerous sequences/constructs which could be harmful to whole system and should be banned from code " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_concurrent_sessions do? : allow concurrent sessions config.toml: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_concurrent_sessions. : allow concurrent sessions config.toml: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable concurrent session for same user: . : Set the allow concurrent sessions config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_concurrent_sessions", + "output": "allow concurrent sessions config.toml: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_concurrent_sessions", + "output": "allow concurrent sessions config.toml: Enable concurrent session for same user: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow concurrent sessions", + "output": "allow concurrent sessions config.toml: Enable concurrent session for same user: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable concurrent session for same user: ", + "output": "allow concurrent sessions config.toml: Enable concurrent session for same user: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_concurrent_sessions", + "output": "allow concurrent sessions config.toml: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_concurrent_sessions", + "output": "allow concurrent sessions config.toml: Enable concurrent session for same user: If enabled, user can log in from 2 browsers (scripts) at the same time" + }, + { + "prompt_type": "plain", + "instruction": ": What does extra_http_headers do? : extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain extra_http_headers. : extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extra_http_headers", + "output": "extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extra_http_headers", + "output": "extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extra http headers", + "output": "extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting extra_http_headers", + "output": "extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting extra_http_headers", + "output": "extra http headers config.toml: Extra HTTP headers." + }, + { + "prompt_type": "plain", + "instruction": ": What does http_cookie_attributes do? : http cookie attributes config.toml: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain http_cookie_attributes. : http cookie attributes config.toml: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Extra HTTP cookie flags: . : Set the http cookie attributes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "http_cookie_attributes", + "output": "http cookie attributes config.toml: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "http_cookie_attributes", + "output": "http cookie attributes config.toml: Extra HTTP cookie flags: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "http cookie attributes", + "output": "http cookie attributes config.toml: Extra HTTP cookie flags: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Extra HTTP cookie flags: ", + "output": "http cookie attributes config.toml: Extra HTTP cookie flags: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting http_cookie_attributes", + "output": "http cookie attributes config.toml: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting http_cookie_attributes", + "output": "http cookie attributes config.toml: Extra HTTP cookie flags: By default DriverlessAI issues cookies with HTTPOnly and Secure attributes (morsels) enabled. In addition to that, SameSite attribute is set to 'Lax', as it's a default in modern browsers. The config overrides the default key/value (morsels)." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_imputation do? : enable imputation config.toml: Enable column imputation" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_imputation. : enable imputation config.toml: Enable column imputation" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enabling imputation adds new picker to EXPT setup GUI and triggers imputation functionality in Transformers : . : Set the enable imputation config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_imputation", + "output": "enable imputation config.toml: Enable column imputation" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_imputation", + "output": "enable imputation config.toml: Enabling imputation adds new picker to EXPT setup GUI and triggers imputation functionality in Transformers : Enable column imputation" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable imputation", + "output": "enable imputation config.toml: Enabling imputation adds new picker to EXPT setup GUI and triggers imputation functionality in Transformers : Enable column imputation" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "\n Enabling imputation adds new picker to EXPT setup GUI\n and triggers imputation functionality in Transformers\n : ", + "output": "enable imputation config.toml: Enabling imputation adds new picker to EXPT setup GUI and triggers imputation functionality in Transformers : Enable column imputation" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_imputation", + "output": "enable imputation config.toml: Enable column imputation" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_imputation", + "output": "enable imputation config.toml: Enabling imputation adds new picker to EXPT setup GUI and triggers imputation functionality in Transformers : Enable column imputation" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_advanced_features_experiment do? : enable advanced features experiment config.toml: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_advanced_features_experiment. : enable advanced features experiment config.toml: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Reveal advanced settings panel in experiment setup: . : Set the enable advanced features experiment config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_advanced_features_experiment", + "output": "enable advanced features experiment config.toml: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_advanced_features_experiment", + "output": "enable advanced features experiment config.toml: Reveal advanced settings panel in experiment setup: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable advanced features experiment", + "output": "enable advanced features experiment config.toml: Reveal advanced settings panel in experiment setup: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Reveal advanced settings panel in experiment setup: ", + "output": "enable advanced features experiment config.toml: Reveal advanced settings panel in experiment setup: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_advanced_features_experiment", + "output": "enable advanced features experiment config.toml: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_advanced_features_experiment", + "output": "enable advanced features experiment config.toml: Reveal advanced settings panel in experiment setup: Adds advanced settings panel to experiment setup, which allows creating custom features and more. " + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_address do? : h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_address. : h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_address", + "output": "h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_address", + "output": "h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage address", + "output": "h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_address", + "output": "h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_address", + "output": "h2o storage address config.toml: Address of the H2O Storage endpoint. Keep empty to use the local storage only." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_projects_enabled do? : h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_projects_enabled. : h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_projects_enabled", + "output": "h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_projects_enabled", + "output": "h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage projects enabled", + "output": "h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_projects_enabled", + "output": "h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_projects_enabled", + "output": "h2o storage projects enabled config.toml: Whether to use remote projects stored in H2O Storage instead of local projects." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_tls_enabled do? : h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_tls_enabled. : h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_enabled", + "output": "h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_enabled", + "output": "h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage tls enabled", + "output": "h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_tls_enabled", + "output": "h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_tls_enabled", + "output": "h2o storage tls enabled config.toml: Whether the channel to the storage should be encrypted." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_tls_ca_path do? : h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_tls_ca_path. : h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_ca_path", + "output": "h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_ca_path", + "output": "h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage tls ca path", + "output": "h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_tls_ca_path", + "output": "h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_tls_ca_path", + "output": "h2o storage tls ca path config.toml: Path to the certification authority certificate that H2O Storage server identity will be checked against." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_tls_cert_path do? : h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_tls_cert_path. : h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_cert_path", + "output": "h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_cert_path", + "output": "h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage tls cert path", + "output": "h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_tls_cert_path", + "output": "h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_tls_cert_path", + "output": "h2o storage tls cert path config.toml: Path to the client certificate to authenticate with H2O Storage server" + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_tls_key_path do? : h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_tls_key_path. : h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_key_path", + "output": "h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_tls_key_path", + "output": "h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage tls key path", + "output": "h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_tls_key_path", + "output": "h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_tls_key_path", + "output": "h2o storage tls key path config.toml: Path to the client key to authenticate with H2O Storage server" + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_internal_default_project_id do? : h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_internal_default_project_id. : h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_internal_default_project_id", + "output": "h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_internal_default_project_id", + "output": "h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage internal default project id", + "output": "h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_internal_default_project_id", + "output": "h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_internal_default_project_id", + "output": "h2o storage internal default project id config.toml: UUID of a Storage project to use instead of the remote HOME folder." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_rpc_deadline_seconds do? : h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_rpc_deadline_seconds. : h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_rpc_deadline_seconds", + "output": "h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_rpc_deadline_seconds", + "output": "h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage rpc deadline seconds", + "output": "h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_rpc_deadline_seconds", + "output": "h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_rpc_deadline_seconds", + "output": "h2o storage rpc deadline seconds config.toml: Deadline for RPC calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_rpc_bytestream_deadline_seconds do? : h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_rpc_bytestream_deadline_seconds. : h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_rpc_bytestream_deadline_seconds", + "output": "h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_rpc_bytestream_deadline_seconds", + "output": "h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage rpc bytestream deadline seconds", + "output": "h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_rpc_bytestream_deadline_seconds", + "output": "h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_rpc_bytestream_deadline_seconds", + "output": "h2o storage rpc bytestream deadline seconds config.toml: Deadline for RPC bytestrteam calls with H2O Storage in seconds. Sets maximum number of seconds that Driverless waits for RPC call to complete before it cancels it. This value is used for uploading and downloading artifacts." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_oauth2_scopes do? : h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_oauth2_scopes. : h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_oauth2_scopes", + "output": "h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_oauth2_scopes", + "output": "h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage oauth2 scopes", + "output": "h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_oauth2_scopes", + "output": "h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_oauth2_scopes", + "output": "h2o storage oauth2 scopes config.toml: Storage client manages it's own access tokens derived from the refresh token received on the user login. When this option is set access token with the scopes defined here is requested. (space separated list)" + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_storage_message_size_limit do? : h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_storage_message_size_limit. : h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_message_size_limit", + "output": "h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_storage_message_size_limit", + "output": "h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o storage message size limit", + "output": "h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_storage_message_size_limit", + "output": "h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_storage_message_size_limit", + "output": "h2o storage message size limit config.toml: Maximum size of message size of RPC request in bytes. Requests larger than this limit will fail." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_mlops_ui_url do? : h2o mlops ui url config.toml: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_mlops_ui_url. : h2o mlops ui url config.toml: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: MLOps UI URL address: . : Set the h2o mlops ui url config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_mlops_ui_url", + "output": "h2o mlops ui url config.toml: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_mlops_ui_url", + "output": "h2o mlops ui url config.toml: MLOps UI URL address: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o mlops ui url", + "output": "h2o mlops ui url config.toml: MLOps UI URL address: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "MLOps UI URL address: ", + "output": "h2o mlops ui url config.toml: MLOps UI URL address: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_mlops_ui_url", + "output": "h2o mlops ui url config.toml: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_mlops_ui_url", + "output": "h2o mlops ui url config.toml: MLOps UI URL address: If the `h2o_mlops_ui_url` is provided alongside the `enable_storage`, DAI is able to redirect user to the MLOps app upon clicking the Deploy button." + }, + { + "prompt_type": "plain", + "instruction": ": What does keystore_file do? : keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "plain", + "instruction": ": Explain keystore_file. : keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "keystore_file", + "output": "keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "keystore_file", + "output": "keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "keystore file", + "output": "keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting keystore_file", + "output": "keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting keystore_file", + "output": "keystore file config.toml: Keystore file that contains secure config.toml items like passwords, secret keys etc. Keystore is managed by h2oai.keystore tool." + }, + { + "prompt_type": "plain", + "instruction": ": What does log_level do? : log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "plain", + "instruction": ": Explain log_level. : log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_level", + "output": "log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log_level", + "output": "log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "log level", + "output": "log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting log_level", + "output": "log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting log_level", + "output": "log level config.toml: Verbosity of logging 0: quiet (CRITICAL, ERROR, WARNING) 1: default (CRITICAL, ERROR, WARNING, INFO, DATA) 2: verbose (CRITICAL, ERROR, WARNING, INFO, DATA, DEBUG) Affects server and all experiments" + }, + { + "prompt_type": "plain", + "instruction": ": What does collect_server_logs_in_experiment_logs do? : collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "plain", + "instruction": ": Explain collect_server_logs_in_experiment_logs. : collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "collect_server_logs_in_experiment_logs", + "output": "collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "collect_server_logs_in_experiment_logs", + "output": "collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "collect server logs in experiment logs", + "output": "collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting collect_server_logs_in_experiment_logs", + "output": "collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting collect_server_logs_in_experiment_logs", + "output": "collect server logs in experiment logs config.toml: Whether to collect relevant server logs (h2oai_server.log, dai.log from systemctl or docker, and h2o log) Useful for when sending logs to H2O.ai" + }, + { + "prompt_type": "plain", + "instruction": ": What does migrate_all_entities_to_user do? : migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "plain", + "instruction": ": Explain migrate_all_entities_to_user. : migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "migrate_all_entities_to_user", + "output": "migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "migrate_all_entities_to_user", + "output": "migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "migrate all entities to user", + "output": "migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting migrate_all_entities_to_user", + "output": "migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting migrate_all_entities_to_user", + "output": "migrate all entities to user config.toml: When set, will migrate all user entities to the defined user upon startup, this is mostly useful during instance migration via H2O's AIEM/Steam." + }, + { + "prompt_type": "plain", + "instruction": ": What does per_user_directories do? : per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain per_user_directories. : per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per_user_directories", + "output": "per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per_user_directories", + "output": "per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per user directories", + "output": "per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting per_user_directories", + "output": "per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting per_user_directories", + "output": "per user directories config.toml: Whether to have all user content isolated into a directory for each user. If set to False, all users content is common to single directory, recipes are shared, and brain folder for restart/refit is shared. If set to True, each user has separate folder for all user tasks, recipes are isolated to each user, and brain folder for restart/refit is only for the specific user. Migration from False to True or back to False is allowed for all experiment content accessible by GUI or python client, all recipes, and starting experiment with same settings, restart, or refit. However, if switch to per-user mode, the common brain folder is no longer used. " + }, + { + "prompt_type": "plain", + "instruction": ": What does data_import_ignore_file_names do? : data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_import_ignore_file_names. : data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_import_ignore_file_names", + "output": "data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_import_ignore_file_names", + "output": "data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data import ignore file names", + "output": "data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_import_ignore_file_names", + "output": "data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_import_ignore_file_names", + "output": "data import ignore file names config.toml: List of file names to ignore during dataset import. Any files with names listed above will be skipped when DAI creates a dataset. Example, directory contains 3 files: [data_1.csv, data_2.csv, _SUCCESS] DAI will only attempt to create a dataset using files data_1.csv and data_2.csv, and _SUCCESS file will be ignored. Default is to ignore _SUCCESS files which are commonly created in exporting data from Hadoop " + }, + { + "prompt_type": "plain", + "instruction": ": What does data_import_upcast_multi_file do? : data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_import_upcast_multi_file. : data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_import_upcast_multi_file", + "output": "data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_import_upcast_multi_file", + "output": "data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data import upcast multi file", + "output": "data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_import_upcast_multi_file", + "output": "data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_import_upcast_multi_file", + "output": "data import upcast multi file config.toml: For data import from a directory (multiple files), allow column types to differ and perform upcast during import." + }, + { + "prompt_type": "plain", + "instruction": ": What does data_import_explode_list_type_columns_in_parquet do? : data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_import_explode_list_type_columns_in_parquet. : data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_import_explode_list_type_columns_in_parquet", + "output": "data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_import_explode_list_type_columns_in_parquet", + "output": "data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data import explode list type columns in parquet", + "output": "data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_import_explode_list_type_columns_in_parquet", + "output": "data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_import_explode_list_type_columns_in_parquet", + "output": "data import explode list type columns in parquet config.toml: If set to true, will explode columns with list data type when importing parquet files." + }, + { + "prompt_type": "plain", + "instruction": ": What does files_without_extensions_expected_types do? : files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "plain", + "instruction": ": Explain files_without_extensions_expected_types. : files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "files_without_extensions_expected_types", + "output": "files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "files_without_extensions_expected_types", + "output": "files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "files without extensions expected types", + "output": "files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting files_without_extensions_expected_types", + "output": "files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting files_without_extensions_expected_types", + "output": "files without extensions expected types config.toml: List of file types that Driverless AI should attempt to import data as IF no file extension exists in the file name If no file extension is provided, Driverless AI will attempt to import the data starting with first type in the defined list. Default [\"parquet\", \"orc\"] Example: 'test.csv' (file extension exists) vs 'test' (file extension DOES NOT exist) NOTE: see supported_file_types configuration option for more details on supported file types " + }, + { + "prompt_type": "plain", + "instruction": ": What does do_not_log_list do? : do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "plain", + "instruction": ": Explain do_not_log_list. : do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do_not_log_list", + "output": "do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do_not_log_list", + "output": "do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do not log list", + "output": "do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting do_not_log_list", + "output": "do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting do_not_log_list", + "output": "do not log list config.toml: do_not_log_list : add configurations that you do not wish to be recorded in logs here.They will still be stored in experiment information so child experiments can behave consistently." + }, + { + "prompt_type": "plain", + "instruction": ": What does do_not_store_list do? : do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "plain", + "instruction": ": Explain do_not_store_list. : do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do_not_store_list", + "output": "do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do_not_store_list", + "output": "do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "do not store list", + "output": "do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting do_not_store_list", + "output": "do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting do_not_store_list", + "output": "do not store list config.toml: do_not_store_list : add configurations that you do not wish to be stored at all here.Will not be remembered across experiments, so not applicable to data science related itemsthat could be controlled by a user. These items are automatically not logged." + }, + { + "prompt_type": "plain", + "instruction": ": What does datatable_parse_max_memory_bytes do? : datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "plain", + "instruction": ": Explain datatable_parse_max_memory_bytes. : datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable_parse_max_memory_bytes", + "output": "datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable_parse_max_memory_bytes", + "output": "datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable parse max memory bytes", + "output": "datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting datatable_parse_max_memory_bytes", + "output": "datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting datatable_parse_max_memory_bytes", + "output": "datatable parse max memory bytes config.toml: Memory limit in bytes for datatable to use during parsing of CSV files. -1 for unlimited. 0 for automatic. >0 for constraint." + }, + { + "prompt_type": "plain", + "instruction": ": What does datatable_separator do? : datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "plain", + "instruction": ": Explain datatable_separator. : datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable_separator", + "output": "datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable_separator", + "output": "datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable separator", + "output": "datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting datatable_separator", + "output": "datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting datatable_separator", + "output": "datatable separator config.toml: Delimiter/Separator to use when parsing tabular text files like CSV. Automatic if empty. Must be provided at system start." + }, + { + "prompt_type": "plain", + "instruction": ": What does ping_load_data_file do? : ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ping_load_data_file. : ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to enable ping of system status during DAI data ingestion.: . : Set the ping load data file config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_load_data_file", + "output": "ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_load_data_file", + "output": "ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion.: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping load data file", + "output": "ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion.: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to enable ping of system status during DAI data ingestion.: ", + "output": "ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion.: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ping_load_data_file", + "output": "ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ping_load_data_file", + "output": "ping load data file config.toml: Whether to enable ping of system status during DAI data ingestion.: Whether to enable ping of system status during DAI data ingestion." + }, + { + "prompt_type": "plain", + "instruction": ": What does ping_sleep_period do? : ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ping_sleep_period. : ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_sleep_period", + "output": "ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping_sleep_period", + "output": "ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ping sleep period", + "output": "ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ping_sleep_period", + "output": "ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ping_sleep_period", + "output": "ping sleep period config.toml: Period between checking DAI status. Should be small enough to avoid slowing parent who stops ping process." + }, + { + "prompt_type": "plain", + "instruction": ": What does data_precision do? : data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_precision. : data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_precision", + "output": "data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_precision", + "output": "data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data precision", + "output": "data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_precision", + "output": "data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_precision", + "output": "data precision config.toml: Precision of how data is stored 'datatable' keeps original datatable storage types (i.e. bool, int, float32, float64) (experimental) 'float32' best for speed, 'float64' best for accuracy or very large input values, \"datatable\" best for memory 'float32' allows numbers up to about +-3E38 with relative error of about 1E-7 'float64' allows numbers up to about +-1E308 with relative error of about 1E-16 Some calculations, like the GLM standardization, can only handle up to sqrt() of these maximums for data values, So GLM with 32-bit precision can only handle up to about a value of 1E19 before standardization generates inf values. If you see \"Best individual has invalid score\" you may require higher precision." + }, + { + "prompt_type": "plain", + "instruction": ": What does transformer_precision do? : transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "plain", + "instruction": ": Explain transformer_precision. : transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "transformer_precision", + "output": "transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "transformer_precision", + "output": "transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "transformer precision", + "output": "transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting transformer_precision", + "output": "transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting transformer_precision", + "output": "transformer precision config.toml: Precision of most data transformers (same options and notes as data_precision). Useful for higher precision in transformers with numerous operations that can accumulate error. Also useful if want faster performance for transformers but otherwise want data stored in high precision." + }, + { + "prompt_type": "plain", + "instruction": ": What does ulimit_up_to_hard_limit do? : ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ulimit_up_to_hard_limit. : ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ulimit_up_to_hard_limit", + "output": "ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ulimit_up_to_hard_limit", + "output": "ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ulimit up to hard limit", + "output": "ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ulimit_up_to_hard_limit", + "output": "ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ulimit_up_to_hard_limit", + "output": "ulimit up to hard limit config.toml: Whether to change ulimit soft limits up to hard limits (for DAI server app, which is not a generic user app). Prevents resource limit problems in some cases. Restricted to no more than limit_nofile and limit_nproc for those resources." + }, + { + "prompt_type": "plain", + "instruction": ": What does disable_core_files do? : disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain disable_core_files. : disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disable_core_files", + "output": "disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disable_core_files", + "output": "disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disable core files", + "output": "disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: ", + "output": "disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting disable_core_files", + "output": "disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting disable_core_files", + "output": "disable core files config.toml: Whether to disable core files if debug_log=true. If debug_log=false, core file creation is always disabled.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does limit_nofile do? : limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "plain", + "instruction": ": Explain limit_nofile. : limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_nofile", + "output": "limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_nofile", + "output": "limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit nofile", + "output": "limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting limit_nofile", + "output": "limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting limit_nofile", + "output": "limit nofile config.toml: number of file limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "plain", + "instruction": ": What does limit_nproc do? : limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "plain", + "instruction": ": Explain limit_nproc. : limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_nproc", + "output": "limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit_nproc", + "output": "limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "limit nproc", + "output": "limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting limit_nproc", + "output": "limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting limit_nproc", + "output": "limit nproc config.toml: number of threads limit Below should be consistent with start-dai.sh" + }, + { + "prompt_type": "plain", + "instruction": ": What does compute_correlation do? : compute correlation config.toml: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "plain", + "instruction": ": Explain compute_correlation. : compute correlation config.toml: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Compute correlation matrix: . : Set the compute correlation config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "compute_correlation", + "output": "compute correlation config.toml: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "compute_correlation", + "output": "compute correlation config.toml: Compute correlation matrix: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "compute correlation", + "output": "compute correlation config.toml: Compute correlation matrix: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Compute correlation matrix: ", + "output": "compute correlation config.toml: Compute correlation matrix: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting compute_correlation", + "output": "compute correlation config.toml: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting compute_correlation", + "output": "compute correlation config.toml: Compute correlation matrix: ' Whether to compute training, validation, and test correlation matrix (table and heatmap pdf) and save to disk alpha: WARNING: currently single threaded and quadratically slow for many columns" + }, + { + "prompt_type": "plain", + "instruction": ": What does produce_correlation_heatmap do? : produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "plain", + "instruction": ": Explain produce_correlation_heatmap. : produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "produce_correlation_heatmap", + "output": "produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "produce_correlation_heatmap", + "output": "produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "produce correlation heatmap", + "output": "produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting produce_correlation_heatmap", + "output": "produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting produce_correlation_heatmap", + "output": "produce correlation heatmap config.toml: Whether to dump to disk a correlation heatmap" + }, + { + "prompt_type": "plain", + "instruction": ": What does high_correlation_value_to_report do? : high correlation value to report config.toml: Value to report high correlation between original features" + }, + { + "prompt_type": "plain", + "instruction": ": Explain high_correlation_value_to_report. : high correlation value to report config.toml: Value to report high correlation between original features" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Threshold for reporting high correlation: . : Set the high correlation value to report config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "high_correlation_value_to_report", + "output": "high correlation value to report config.toml: Value to report high correlation between original features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "high_correlation_value_to_report", + "output": "high correlation value to report config.toml: Threshold for reporting high correlation: Value to report high correlation between original features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "high correlation value to report", + "output": "high correlation value to report config.toml: Threshold for reporting high correlation: Value to report high correlation between original features" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Threshold for reporting high correlation: ", + "output": "high correlation value to report config.toml: Threshold for reporting high correlation: Value to report high correlation between original features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting high_correlation_value_to_report", + "output": "high correlation value to report config.toml: Value to report high correlation between original features" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting high_correlation_value_to_report", + "output": "high correlation value to report config.toml: Threshold for reporting high correlation: Value to report high correlation between original features" + }, + { + "prompt_type": "plain", + "instruction": ": What does restart_experiments_after_shutdown do? : restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "plain", + "instruction": ": Explain restart_experiments_after_shutdown. : restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restart_experiments_after_shutdown", + "output": "restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restart_experiments_after_shutdown", + "output": "restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restart experiments after shutdown", + "output": "restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting restart_experiments_after_shutdown", + "output": "restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting restart_experiments_after_shutdown", + "output": "restart experiments after shutdown config.toml: If True, experiments aborted by server restart will automatically restart and continue upon user login" + }, + { + "prompt_type": "plain", + "instruction": ": What does any_env_overrides do? : any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "plain", + "instruction": ": Explain any_env_overrides. : any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "any_env_overrides", + "output": "any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "any_env_overrides", + "output": "any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "any env overrides", + "output": "any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting any_env_overrides", + "output": "any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting any_env_overrides", + "output": "any env overrides config.toml: When environment variable is set to toml value, consider that an override of any toml value. Experiment's remember toml values for scoring, and this treats any environment set as equivalent to putting OVERRIDE_ in front of the environment key." + }, + { + "prompt_type": "plain", + "instruction": ": What does datatable_bom_csv do? : datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "plain", + "instruction": ": Explain datatable_bom_csv. : datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable_bom_csv", + "output": "datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable_bom_csv", + "output": "datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datatable bom csv", + "output": "datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting datatable_bom_csv", + "output": "datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting datatable_bom_csv", + "output": "datatable bom csv config.toml: Include byte order mark (BOM) when writing CSV files. Required to support UTF-8 encoding in Excel." + }, + { + "prompt_type": "plain", + "instruction": ": What does debug_print do? : debug print config.toml: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "plain", + "instruction": ": Explain debug_print. : debug print config.toml: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable debug prints to console: . : Set the debug print config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug_print", + "output": "debug print config.toml: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug_print", + "output": "debug print config.toml: Enable debug prints to console: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug print", + "output": "debug print config.toml: Enable debug prints to console: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable debug prints to console: ", + "output": "debug print config.toml: Enable debug prints to console: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting debug_print", + "output": "debug print config.toml: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting debug_print", + "output": "debug print config.toml: Enable debug prints to console: Whether to enable debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files." + }, + { + "prompt_type": "plain", + "instruction": ": What does debug_print_level do? : debug print level config.toml: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "plain", + "instruction": ": Explain debug_print_level. : debug print level config.toml: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Level of debug to print: . : Set the debug print level config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug_print_level", + "output": "debug print level config.toml: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug_print_level", + "output": "debug print level config.toml: Level of debug to print: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "debug print level", + "output": "debug print level config.toml: Level of debug to print: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Level of debug to print: ", + "output": "debug print level config.toml: Level of debug to print: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting debug_print_level", + "output": "debug print level config.toml: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting debug_print_level", + "output": "debug print level config.toml: Level of debug to print: Level (0-4) for debug prints (to console/stdout/stderr), e.g. showing up in dai*.log or dai*.txt type files. 1-2 is normal, 4 would lead to highly excessive debug and is not recommended in production." + }, + { + "prompt_type": "plain", + "instruction": ": What does check_invalid_config_toml_keys do? : check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_invalid_config_toml_keys. : check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_invalid_config_toml_keys", + "output": "check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_invalid_config_toml_keys", + "output": "check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check invalid config toml keys", + "output": "check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_invalid_config_toml_keys", + "output": "check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_invalid_config_toml_keys", + "output": "check invalid config toml keys config.toml: Whether to check if config.toml keys are valid and fail if not valid" + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_no_pid_host do? : allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_no_pid_host. : allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_no_pid_host", + "output": "allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_no_pid_host", + "output": "allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow no pid host", + "output": "allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: ", + "output": "allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_no_pid_host", + "output": "allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_no_pid_host", + "output": "allow no pid host config.toml: Whether to allow no --pid=host setting. Some GPU info from within docker will not be correct.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does final_munging_memory_reduction_factor do? : final munging memory reduction factor config.toml: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain final_munging_memory_reduction_factor. : final munging memory reduction factor config.toml: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Factor to reduce estimated memory usage by: . : Set the final munging memory reduction factor config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "final_munging_memory_reduction_factor", + "output": "final munging memory reduction factor config.toml: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "final_munging_memory_reduction_factor", + "output": "final munging memory reduction factor config.toml: Factor to reduce estimated memory usage by: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "final munging memory reduction factor", + "output": "final munging memory reduction factor config.toml: Factor to reduce estimated memory usage by: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Factor to reduce estimated memory usage by: ", + "output": "final munging memory reduction factor config.toml: Factor to reduce estimated memory usage by: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting final_munging_memory_reduction_factor", + "output": "final munging memory reduction factor config.toml: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting final_munging_memory_reduction_factor", + "output": "final munging memory reduction factor config.toml: Factor to reduce estimated memory usage by: Reduce memory usage during final ensemble feature engineering (1 uses most memory, larger values use less memory)" + }, + { + "prompt_type": "plain", + "instruction": ": What does munging_memory_overhead_factor do? : munging memory overhead factor config.toml: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "plain", + "instruction": ": Explain munging_memory_overhead_factor. : munging memory overhead factor config.toml: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Memory use per transformer per input data size: . : Set the munging memory overhead factor config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "munging_memory_overhead_factor", + "output": "munging memory overhead factor config.toml: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "munging_memory_overhead_factor", + "output": "munging memory overhead factor config.toml: Memory use per transformer per input data size: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "munging memory overhead factor", + "output": "munging memory overhead factor config.toml: Memory use per transformer per input data size: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Memory use per transformer per input data size: ", + "output": "munging memory overhead factor config.toml: Memory use per transformer per input data size: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting munging_memory_overhead_factor", + "output": "munging memory overhead factor config.toml: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting munging_memory_overhead_factor", + "output": "munging memory overhead factor config.toml: Memory use per transformer per input data size: How much more memory a typical transformer needs than the input data. Can be increased if, e.g., final model munging uses too much memory due to parallel operations." + }, + { + "prompt_type": "plain", + "instruction": ": What does per_transformer_segfault_protection_ga do? : per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain per_transformer_segfault_protection_ga. : per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per_transformer_segfault_protection_ga", + "output": "per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per_transformer_segfault_protection_ga", + "output": "per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per transformer segfault protection ga", + "output": "per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: ", + "output": "per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting per_transformer_segfault_protection_ga", + "output": "per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting per_transformer_segfault_protection_ga", + "output": "per transformer segfault protection ga config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during tuning and evolution. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does per_transformer_segfault_protection_final do? : per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain per_transformer_segfault_protection_final. : per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per_transformer_segfault_protection_final", + "output": "per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per_transformer_segfault_protection_final", + "output": "per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "per transformer segfault protection final", + "output": "per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: ", + "output": "per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting per_transformer_segfault_protection_final", + "output": "per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting per_transformer_segfault_protection_final", + "output": "per transformer segfault protection final config.toml: Whether to have per-transformer segfault protection when munging data into transformed features during final model fitting and scoring. Can lead to significant slowdown for cases when large data but data is sampled, leaving large objects in parent fork, leading to slow fork time for each transformer.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does submit_resource_wait_period do? : submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "plain", + "instruction": ": Explain submit_resource_wait_period. : submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "submit_resource_wait_period", + "output": "submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "submit_resource_wait_period", + "output": "submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "submit resource wait period", + "output": "submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting submit_resource_wait_period", + "output": "submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting submit_resource_wait_period", + "output": "submit resource wait period config.toml: How often to check resources (disk, memory, cpu) to see if need to stall submission." + }, + { + "prompt_type": "plain", + "instruction": ": What does stall_subprocess_submission_cpu_threshold_pct do? : stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "plain", + "instruction": ": Explain stall_subprocess_submission_cpu_threshold_pct. : stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_cpu_threshold_pct", + "output": "stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_cpu_threshold_pct", + "output": "stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall subprocess submission cpu threshold pct", + "output": "stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stall_subprocess_submission_cpu_threshold_pct", + "output": "stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stall_subprocess_submission_cpu_threshold_pct", + "output": "stall subprocess submission cpu threshold pct config.toml: Stall submission of subprocesses if system CPU usage is higher than this threshold in percent (set to 100 to disable). A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "plain", + "instruction": ": What does stall_subprocess_submission_dai_fork_threshold_pct do? : stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "plain", + "instruction": ": Explain stall_subprocess_submission_dai_fork_threshold_pct. : stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_dai_fork_threshold_pct", + "output": "stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_dai_fork_threshold_pct", + "output": "stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall subprocess submission dai fork threshold pct", + "output": "stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stall_subprocess_submission_dai_fork_threshold_pct", + "output": "stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stall_subprocess_submission_dai_fork_threshold_pct", + "output": "stall subprocess submission dai fork threshold pct config.toml: Restrict/Stall submission of subprocesses if DAI fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking. A reasonable number is 90.0 if activated" + }, + { + "prompt_type": "plain", + "instruction": ": What does stall_subprocess_submission_experiment_fork_threshold_pct do? : stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "plain", + "instruction": ": Explain stall_subprocess_submission_experiment_fork_threshold_pct. : stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_experiment_fork_threshold_pct", + "output": "stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall_subprocess_submission_experiment_fork_threshold_pct", + "output": "stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "stall subprocess submission experiment fork threshold pct", + "output": "stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting stall_subprocess_submission_experiment_fork_threshold_pct", + "output": "stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting stall_subprocess_submission_experiment_fork_threshold_pct", + "output": "stall subprocess submission experiment fork threshold pct config.toml: Restrict/Stall submission of subprocesses if experiment fork count (across all experiments) per unit ulimit nproc soft limit is higher than this threshold in percent (set to -1 to disable, 0 for minimal forking). A reasonable number is 90.0 if activated. For small data leads to overhead of about 0.1s per task submitted due to checks, so for scoring can slow things down for tests." + }, + { + "prompt_type": "plain", + "instruction": ": What does restrict_initpool_by_memory do? : restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "plain", + "instruction": ": Explain restrict_initpool_by_memory. : restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restrict_initpool_by_memory", + "output": "restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restrict_initpool_by_memory", + "output": "restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "restrict initpool by memory", + "output": "restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting restrict_initpool_by_memory", + "output": "restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting restrict_initpool_by_memory", + "output": "restrict initpool by memory config.toml: Whether to restrict pool workers even if not used, by reducing number of pool workers available. Good if really huge number of experiments, but otherwise, best to have all pool workers ready and only stall submission of tasks so can be dynamic to multi-experiment environment" + }, + { + "prompt_type": "plain", + "instruction": ": What does terminate_experiment_if_memory_low do? : terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "plain", + "instruction": ": Explain terminate_experiment_if_memory_low. : terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "terminate_experiment_if_memory_low", + "output": "terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "terminate_experiment_if_memory_low", + "output": "terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "terminate experiment if memory low", + "output": "terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting terminate_experiment_if_memory_low", + "output": "terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting terminate_experiment_if_memory_low", + "output": "terminate experiment if memory low config.toml: Whether to terminate experiments if the system memory available falls below memory_limit_gb_terminate" + }, + { + "prompt_type": "plain", + "instruction": ": What does memory_limit_gb_terminate do? : memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "plain", + "instruction": ": Explain memory_limit_gb_terminate. : memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "memory_limit_gb_terminate", + "output": "memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "memory_limit_gb_terminate", + "output": "memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "memory limit gb terminate", + "output": "memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting memory_limit_gb_terminate", + "output": "memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting memory_limit_gb_terminate", + "output": "memory limit gb terminate config.toml: Memory in GB beyond which will terminate experiment if terminate_experiment_if_memory_low=true." + }, + { + "prompt_type": "plain", + "instruction": ": What does scoring_data_directory do? : scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "plain", + "instruction": ": Explain scoring_data_directory. : scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "scoring_data_directory", + "output": "scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "scoring_data_directory", + "output": "scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "scoring data directory", + "output": "scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting scoring_data_directory", + "output": "scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting scoring_data_directory", + "output": "scoring data directory config.toml: Path to use for scoring directory path relative to run path" + }, + { + "prompt_type": "plain", + "instruction": ": What does last_exclusive_mode do? : last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "plain", + "instruction": ": Explain last_exclusive_mode. : last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "last_exclusive_mode", + "output": "last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "last_exclusive_mode", + "output": "last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "last exclusive mode", + "output": "last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting last_exclusive_mode", + "output": "last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting last_exclusive_mode", + "output": "last exclusive mode config.toml: Internal helper to allow memory of if changed exclusive mode" + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_acceptance_test_mojo_types do? : mojo acceptance test mojo types config.toml: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_acceptance_test_mojo_types. : mojo acceptance test mojo types config.toml: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: MOJO types to test at end of experiment: . : Set the mojo acceptance test mojo types config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_acceptance_test_mojo_types", + "output": "mojo acceptance test mojo types config.toml: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_acceptance_test_mojo_types", + "output": "mojo acceptance test mojo types config.toml: MOJO types to test at end of experiment: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo acceptance test mojo types", + "output": "mojo acceptance test mojo types config.toml: MOJO types to test at end of experiment: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "MOJO types to test at end of experiment: ", + "output": "mojo acceptance test mojo types config.toml: MOJO types to test at end of experiment: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_acceptance_test_mojo_types", + "output": "mojo acceptance test mojo types config.toml: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_acceptance_test_mojo_types", + "output": "mojo acceptance test mojo types config.toml: MOJO types to test at end of experiment: Which MOJO runtimes should be tested as part of the mini acceptance tests" + }, + { + "prompt_type": "plain", + "instruction": ": What does make_mojo_scoring_pipeline_for_features_only do? : make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain make_mojo_scoring_pipeline_for_features_only. : make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Create MOJO for feature engineering pipeline only (no predictions): . : Set the make mojo scoring pipeline for features only config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_mojo_scoring_pipeline_for_features_only", + "output": "make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make_mojo_scoring_pipeline_for_features_only", + "output": "make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions): Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "make mojo scoring pipeline for features only", + "output": "make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions): Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Create MOJO for feature engineering pipeline only (no predictions): ", + "output": "make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions): Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting make_mojo_scoring_pipeline_for_features_only", + "output": "make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting make_mojo_scoring_pipeline_for_features_only", + "output": "make mojo scoring pipeline for features only config.toml: Create MOJO for feature engineering pipeline only (no predictions): Create MOJO for feature engineering pipeline only (no predictions)" + }, + { + "prompt_type": "plain", + "instruction": ": What does mojo_replace_target_encoding_with_grouped_input_cols do? : mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mojo_replace_target_encoding_with_grouped_input_cols. : mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Replaces target encoding features with concatenated input features.: . : Set the mojo replace target encoding with grouped input cols config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_replace_target_encoding_with_grouped_input_cols", + "output": "mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo_replace_target_encoding_with_grouped_input_cols", + "output": "mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features with concatenated input features.: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mojo replace target encoding with grouped input cols", + "output": "mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features with concatenated input features.: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Replaces target encoding features with concatenated input features.: ", + "output": "mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features with concatenated input features.: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mojo_replace_target_encoding_with_grouped_input_cols", + "output": "mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mojo_replace_target_encoding_with_grouped_input_cols", + "output": "mojo replace target encoding with grouped input cols config.toml: Replaces target encoding features with concatenated input features.: Replaces target encoding features by their input columns. Instead of CVTE_Age:Income:Zip, this will create Age:Income:Zip. Only when make_mojo_scoring_pipeline_for_features_only is enabled." + }, + { + "prompt_type": "plain", + "instruction": ": What does predictions_as_transform_only do? : predictions as transform only config.toml: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "plain", + "instruction": ": Explain predictions_as_transform_only. : predictions as transform only config.toml: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Generate transformation when making predictions: . : Set the predictions as transform only config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "predictions_as_transform_only", + "output": "predictions as transform only config.toml: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "predictions_as_transform_only", + "output": "predictions as transform only config.toml: Generate transformation when making predictions: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "predictions as transform only", + "output": "predictions as transform only config.toml: Generate transformation when making predictions: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Generate transformation when making predictions: ", + "output": "predictions as transform only config.toml: Generate transformation when making predictions: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting predictions_as_transform_only", + "output": "predictions as transform only config.toml: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting predictions_as_transform_only", + "output": "predictions as transform only config.toml: Generate transformation when making predictions: Use pipeline to generate transformed features, when making predictions, bypassing the model that usually converts transformed features into predictions." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_single_instance_db_access do? : enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_single_instance_db_access. : enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_single_instance_db_access", + "output": "enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_single_instance_db_access", + "output": "enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable single instance db access", + "output": "enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_single_instance_db_access", + "output": "enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_single_instance_db_access", + "output": "enable single instance db access config.toml: If set to true, will make sure only current instance can access its database" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_pytorch_nlp do? : enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_pytorch_nlp. : enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_pytorch_nlp", + "output": "enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_pytorch_nlp", + "output": "enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable pytorch nlp", + "output": "enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_pytorch_nlp", + "output": "enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_pytorch_nlp", + "output": "enable pytorch nlp config.toml: Deprecated - maps to enable_pytorch_nlp_transformer and enable_pytorch_nlp_model in 1.10.2+" + }, + { + "prompt_type": "plain", + "instruction": ": What does check_timeout_per_gpu do? : check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "plain", + "instruction": ": Explain check_timeout_per_gpu. : check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_timeout_per_gpu", + "output": "check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check_timeout_per_gpu", + "output": "check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "check timeout per gpu", + "output": "check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting check_timeout_per_gpu", + "output": "check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting check_timeout_per_gpu", + "output": "check timeout per gpu config.toml: How long to wait per GPU for tensorflow/torch to run during system checks." + }, + { + "prompt_type": "plain", + "instruction": ": What does gpu_exit_if_fails do? : gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "plain", + "instruction": ": Explain gpu_exit_if_fails. : gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_exit_if_fails", + "output": "gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu_exit_if_fails", + "output": "gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "gpu exit if fails", + "output": "gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting gpu_exit_if_fails", + "output": "gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting gpu_exit_if_fails", + "output": "gpu exit if fails config.toml: Whether to fail start-up if cannot successfully run GPU checks" + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_recipe do? : time series recipe config.toml: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_recipe. : time series recipe config.toml: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Time-series lag-based recipe: . : Set the time series recipe config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_recipe", + "output": "time series recipe config.toml: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_recipe", + "output": "time series recipe config.toml: Time-series lag-based recipe: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series recipe", + "output": "time series recipe config.toml: Time-series lag-based recipe: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Time-series lag-based recipe: ", + "output": "time series recipe config.toml: Time-series lag-based recipe: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_recipe", + "output": "time series recipe config.toml: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_recipe", + "output": "time series recipe config.toml: Time-series lag-based recipe: Enable time series lag-based recipe with lag transformers. If disabled, the same train-test gap and periods are used, but no lag transformers are enabled. If disabled, the set of feature transformations is quite limited without lag transformers, so consider setting enable_time_unaware_transformers to true in order to treat the problem as more like an IID type problem." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_causal_split_recipe do? : time series causal split recipe config.toml: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_causal_split_recipe. : time series causal split recipe config.toml: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether causal recipe is used for non-lag-based recipe: . : Set the time series causal split recipe config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_causal_split_recipe", + "output": "time series causal split recipe config.toml: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_causal_split_recipe", + "output": "time series causal split recipe config.toml: Whether causal recipe is used for non-lag-based recipe: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series causal split recipe", + "output": "time series causal split recipe config.toml: Whether causal recipe is used for non-lag-based recipe: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether causal recipe is used for non-lag-based recipe: ", + "output": "time series causal split recipe config.toml: Whether causal recipe is used for non-lag-based recipe: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_causal_split_recipe", + "output": "time series causal split recipe config.toml: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_causal_split_recipe", + "output": "time series causal split recipe config.toml: Whether causal recipe is used for non-lag-based recipe: Whether causal splits are used when time_series_recipe is false orwhether to use same train-gap-test splits when lag transformers are disabled (default behavior).For train-test gap, period, etc. to be used when lag-based recipe is disabled, this must be false." + }, + { + "prompt_type": "plain", + "instruction": ": What does use_lags_if_causal_recipe do? : use lags if causal recipe config.toml: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_lags_if_causal_recipe. : use lags if causal recipe config.toml: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Use lag transformers when using causal time-series recipe: . : Set the use lags if causal recipe config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_lags_if_causal_recipe", + "output": "use lags if causal recipe config.toml: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_lags_if_causal_recipe", + "output": "use lags if causal recipe config.toml: Use lag transformers when using causal time-series recipe: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use lags if causal recipe", + "output": "use lags if causal recipe config.toml: Use lag transformers when using causal time-series recipe: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Use lag transformers when using causal time-series recipe: ", + "output": "use lags if causal recipe config.toml: Use lag transformers when using causal time-series recipe: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_lags_if_causal_recipe", + "output": "use lags if causal recipe config.toml: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_lags_if_causal_recipe", + "output": "use lags if causal recipe config.toml: Use lag transformers when using causal time-series recipe: Whether to use lag transformers when using causal-split for validation (as occurs when not using time-based lag recipe). If no time groups columns, lag transformers will still use time-column as sole time group column. " + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_leaderboard_mode do? : time series leaderboard mode config.toml: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_leaderboard_mode. : time series leaderboard mode config.toml: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Control the automatic time-series leaderboard mode: . : Set the time series leaderboard mode config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_leaderboard_mode", + "output": "time series leaderboard mode config.toml: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_leaderboard_mode", + "output": "time series leaderboard mode config.toml: Control the automatic time-series leaderboard mode: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series leaderboard mode", + "output": "time series leaderboard mode config.toml: Control the automatic time-series leaderboard mode: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Control the automatic time-series leaderboard mode: ", + "output": "time series leaderboard mode config.toml: Control the automatic time-series leaderboard mode: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_leaderboard_mode", + "output": "time series leaderboard mode config.toml: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_leaderboard_mode", + "output": "time series leaderboard mode config.toml: Control the automatic time-series leaderboard mode: 'diverse': explore a diverse set of models built using various expert settings. Note that it's possible to rerun another such diverse leaderboard on top of the best-performing model(s), which will effectively help you compose these expert settings.'sliding_window': If the forecast horizon is N periods, create a separate model for each of the (gap, horizon) pairs of (0,n), (n,n), (2*n,n), ..., (2*N-1, n) in units of time periods.The number of periods to predict per model n is controlled by the expert setting 'time_series_leaderboard_periods_per_model', which defaults to 1." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_leaderboard_periods_per_model do? : time series leaderboard periods per model config.toml: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_leaderboard_periods_per_model. : time series leaderboard periods per model config.toml: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of periods per model if time_series_leaderboard_mode is 'sliding_window'.: . : Set the time series leaderboard periods per model config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_leaderboard_periods_per_model", + "output": "time series leaderboard periods per model config.toml: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_leaderboard_periods_per_model", + "output": "time series leaderboard periods per model config.toml: Number of periods per model if time_series_leaderboard_mode is 'sliding_window'.: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series leaderboard periods per model", + "output": "time series leaderboard periods per model config.toml: Number of periods per model if time_series_leaderboard_mode is 'sliding_window'.: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of periods per model if time_series_leaderboard_mode is 'sliding_window'.: ", + "output": "time series leaderboard periods per model config.toml: Number of periods per model if time_series_leaderboard_mode is 'sliding_window'.: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_leaderboard_periods_per_model", + "output": "time series leaderboard periods per model config.toml: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_leaderboard_periods_per_model", + "output": "time series leaderboard periods per model config.toml: Number of periods per model if time_series_leaderboard_mode is 'sliding_window'.: Fine-control to limit the number of models built in the 'sliding_window' mode. Larger values lead to fewer models." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_merge_splits do? : time series merge splits config.toml: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_merge_splits. : time series merge splits config.toml: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Larger validation splits for lag-based recipe: . : Set the time series merge splits config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_merge_splits", + "output": "time series merge splits config.toml: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_merge_splits", + "output": "time series merge splits config.toml: Larger validation splits for lag-based recipe: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series merge splits", + "output": "time series merge splits config.toml: Larger validation splits for lag-based recipe: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Larger validation splits for lag-based recipe: ", + "output": "time series merge splits config.toml: Larger validation splits for lag-based recipe: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_merge_splits", + "output": "time series merge splits config.toml: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_merge_splits", + "output": "time series merge splits config.toml: Larger validation splits for lag-based recipe: Whether to create larger validation splits that are not bound to the length of the forecast horizon." + }, + { + "prompt_type": "plain", + "instruction": ": What does merge_splits_max_valid_ratio do? : merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "plain", + "instruction": ": Explain merge_splits_max_valid_ratio. : merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum ratio of training data samples used for validation (-1 = auto): . : Set the merge splits max valid ratio config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "merge_splits_max_valid_ratio", + "output": "merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "merge_splits_max_valid_ratio", + "output": "merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation (-1 = auto): Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "merge splits max valid ratio", + "output": "merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation (-1 = auto): Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum ratio of training data samples used for validation (-1 = auto): ", + "output": "merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation (-1 = auto): Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting merge_splits_max_valid_ratio", + "output": "merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting merge_splits_max_valid_ratio", + "output": "merge splits max valid ratio config.toml: Maximum ratio of training data samples used for validation (-1 = auto): Maximum ratio of training data samples used for validation across splits when larger validation splits are created." + }, + { + "prompt_type": "plain", + "instruction": ": What does fixed_size_train_timespan do? : fixed size train timespan config.toml: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain fixed_size_train_timespan. : fixed size train timespan config.toml: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fixed-size train timespan across splits: . : Set the fixed size train timespan config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_size_train_timespan", + "output": "fixed size train timespan config.toml: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed_size_train_timespan", + "output": "fixed size train timespan config.toml: Fixed-size train timespan across splits: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fixed size train timespan", + "output": "fixed size train timespan config.toml: Fixed-size train timespan across splits: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fixed-size train timespan across splits: ", + "output": "fixed size train timespan config.toml: Fixed-size train timespan across splits: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fixed_size_train_timespan", + "output": "fixed size train timespan config.toml: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fixed_size_train_timespan", + "output": "fixed size train timespan config.toml: Fixed-size train timespan across splits: Whether to keep a fixed-size train timespan across time-based splits. That leads to roughly the same amount of train samples in every split. " + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_validation_fold_split_datetime_boundaries do? : time series validation fold split datetime boundaries config.toml: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_validation_fold_split_datetime_boundaries. : time series validation fold split datetime boundaries config.toml: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Custom validation splits for time-series experiments: . : Set the time series validation fold split datetime boundaries config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_validation_fold_split_datetime_boundaries", + "output": "time series validation fold split datetime boundaries config.toml: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_validation_fold_split_datetime_boundaries", + "output": "time series validation fold split datetime boundaries config.toml: Custom validation splits for time-series experiments: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series validation fold split datetime boundaries", + "output": "time series validation fold split datetime boundaries config.toml: Custom validation splits for time-series experiments: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Custom validation splits for time-series experiments: ", + "output": "time series validation fold split datetime boundaries config.toml: Custom validation splits for time-series experiments: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_validation_fold_split_datetime_boundaries", + "output": "time series validation fold split datetime boundaries config.toml: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_validation_fold_split_datetime_boundaries", + "output": "time series validation fold split datetime boundaries config.toml: Custom validation splits for time-series experiments: Provide date or datetime timestamps (in same format as the time column) for custom training and validation splits like this: \"tr_start1, tr_end1, va_start1, va_end1, ..., tr_startN, tr_endN, va_startN, va_endN\"" + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_validation_splits do? : time series validation splits config.toml: Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_validation_splits. : time series validation splits config.toml: Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of time-based splits for internal model validation (-1 = auto): . : Set the time series validation splits config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_validation_splits", + "output": "time series validation splits config.toml: Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_validation_splits", + "output": "time series validation splits config.toml: Number of time-based splits for internal model validation (-1 = auto): Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series validation splits", + "output": "time series validation splits config.toml: Number of time-based splits for internal model validation (-1 = auto): Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of time-based splits for internal model validation (-1 = auto): ", + "output": "time series validation splits config.toml: Number of time-based splits for internal model validation (-1 = auto): Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_validation_splits", + "output": "time series validation splits config.toml: Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_validation_splits", + "output": "time series validation splits config.toml: Number of time-based splits for internal model validation (-1 = auto): Set fixed number of time-based splits for internal model validation (actual number of splits allowed can be less and is determined at experiment run-time)." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_splits_max_overlap do? : time series splits max overlap config.toml: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_splits_max_overlap. : time series splits max overlap config.toml: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum overlap between two time-based splits.: . : Set the time series splits max overlap config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_splits_max_overlap", + "output": "time series splits max overlap config.toml: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_splits_max_overlap", + "output": "time series splits max overlap config.toml: Maximum overlap between two time-based splits.: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series splits max overlap", + "output": "time series splits max overlap config.toml: Maximum overlap between two time-based splits.: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum overlap between two time-based splits.: ", + "output": "time series splits max overlap config.toml: Maximum overlap between two time-based splits.: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_splits_max_overlap", + "output": "time series splits max overlap config.toml: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_splits_max_overlap", + "output": "time series splits max overlap config.toml: Maximum overlap between two time-based splits.: Maximum overlap between two time-based splits. Higher values increase the amount of possible splits." + }, + { + "prompt_type": "plain", + "instruction": ": What does min_ymd_timestamp do? : min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_ymd_timestamp. : min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_ymd_timestamp", + "output": "min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_ymd_timestamp", + "output": "min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min ymd timestamp", + "output": "min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_ymd_timestamp", + "output": "min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_ymd_timestamp", + "output": "min ymd timestamp config.toml: Earliest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 or 201004022312 can be converted to a valid date/datetime, but 1000 or 100004 or 10000402 or 10004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_ymd_timestamp do? : max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_ymd_timestamp. : max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_ymd_timestamp", + "output": "max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_ymd_timestamp", + "output": "max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max ymd timestamp", + "output": "max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_ymd_timestamp", + "output": "max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_ymd_timestamp", + "output": "max ymd timestamp config.toml: Latest allowed datetime (in %Y%m%d format) for which to allow automatic conversion of integers to a time column during parsing. For example, 2010 or 201004 or 20100402 can be converted to a valid date/datetime, but 3000 or 300004 or 30000402 or 30004022313 can not, and neither can 201000 or 20100500 etc." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_rows_datetime_format_detection do? : max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_rows_datetime_format_detection. : max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_datetime_format_detection", + "output": "max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_rows_datetime_format_detection", + "output": "max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max rows datetime format detection", + "output": "max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_rows_datetime_format_detection", + "output": "max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_rows_datetime_format_detection", + "output": "max rows datetime format detection config.toml: maximum number of data samples (randomly selected rows) for date/datetime format detection" + }, + { + "prompt_type": "plain", + "instruction": ": What does disallowed_datetime_formats do? : disallowed datetime formats config.toml: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain disallowed_datetime_formats. : disallowed datetime formats config.toml: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: List of disallowed datetime formats.: . : Set the disallowed datetime formats config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disallowed_datetime_formats", + "output": "disallowed datetime formats config.toml: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disallowed_datetime_formats", + "output": "disallowed datetime formats config.toml: List of disallowed datetime formats.: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "disallowed datetime formats", + "output": "disallowed datetime formats config.toml: List of disallowed datetime formats.: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "List of disallowed datetime formats.: ", + "output": "disallowed datetime formats config.toml: List of disallowed datetime formats.: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting disallowed_datetime_formats", + "output": "disallowed datetime formats config.toml: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting disallowed_datetime_formats", + "output": "disallowed datetime formats config.toml: List of disallowed datetime formats.: Manually disables certain datetime formats during data ingest and experiments. For example, ['%y'] will avoid parsing columns that contain '00', '01', '02' string values as a date column. " + }, + { + "prompt_type": "plain", + "instruction": ": What does use_datetime_cache do? : use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "plain", + "instruction": ": Explain use_datetime_cache. : use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_datetime_cache", + "output": "use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use_datetime_cache", + "output": "use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "use datetime cache", + "output": "use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting use_datetime_cache", + "output": "use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting use_datetime_cache", + "output": "use datetime cache config.toml: Whether to use datetime cache" + }, + { + "prompt_type": "plain", + "instruction": ": What does datetime_cache_min_rows do? : datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "plain", + "instruction": ": Explain datetime_cache_min_rows. : datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datetime_cache_min_rows", + "output": "datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datetime_cache_min_rows", + "output": "datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datetime cache min rows", + "output": "datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting datetime_cache_min_rows", + "output": "datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting datetime_cache_min_rows", + "output": "datetime cache min rows config.toml: Minimum amount of rows required to utilize datetime cache" + }, + { + "prompt_type": "plain", + "instruction": ": What does holiday_features do? : holiday features config.toml: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "plain", + "instruction": ": Explain holiday_features. : holiday features config.toml: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Generate holiday features: . : Set the holiday features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "holiday_features", + "output": "holiday features config.toml: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "holiday_features", + "output": "holiday features config.toml: Generate holiday features: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "holiday features", + "output": "holiday features config.toml: Generate holiday features: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Generate holiday features: ", + "output": "holiday features config.toml: Generate holiday features: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting holiday_features", + "output": "holiday features config.toml: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting holiday_features", + "output": "holiday features config.toml: Generate holiday features: Automatically generate is-holiday features from date columns" + }, + { + "prompt_type": "plain", + "instruction": ": What does holiday_countries do? : holiday countries config.toml: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "plain", + "instruction": ": Explain holiday_countries. : holiday countries config.toml: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Country code(s) for holiday features: . : Set the holiday countries config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "holiday_countries", + "output": "holiday countries config.toml: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "holiday_countries", + "output": "holiday countries config.toml: Country code(s) for holiday features: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "holiday countries", + "output": "holiday countries config.toml: Country code(s) for holiday features: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Country code(s) for holiday features: ", + "output": "holiday countries config.toml: Country code(s) for holiday features: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting holiday_countries", + "output": "holiday countries config.toml: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting holiday_countries", + "output": "holiday countries config.toml: Country code(s) for holiday features: List of countries for which to look up holiday calendar and to generate is-Holiday features for" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_time_series_properties_sample_size do? : max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_time_series_properties_sample_size. : max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_time_series_properties_sample_size", + "output": "max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_time_series_properties_sample_size", + "output": "max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max time series properties sample size", + "output": "max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_time_series_properties_sample_size", + "output": "max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_time_series_properties_sample_size", + "output": "max time series properties sample size config.toml: Max. sample size for automatic determination of time series train/valid split properties, only if time column is selected" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_lag_sizes do? : max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_lag_sizes. : max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_lag_sizes", + "output": "max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_lag_sizes", + "output": "max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max lag sizes", + "output": "max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_lag_sizes", + "output": "max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_lag_sizes", + "output": "max lag sizes config.toml: Maximum number of lag sizes to use for lags-based time-series experiments. are sampled from if sample_lag_sizes==True, else all are taken (-1 == automatic)" + }, + { + "prompt_type": "plain", + "instruction": ": What does min_lag_autocorrelation do? : min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_lag_autocorrelation. : min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_lag_autocorrelation", + "output": "min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_lag_autocorrelation", + "output": "min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min lag autocorrelation", + "output": "min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_lag_autocorrelation", + "output": "min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_lag_autocorrelation", + "output": "min lag autocorrelation config.toml: Minimum required autocorrelation threshold for a lag to be considered for feature engineering" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_signal_lag_sizes do? : max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_signal_lag_sizes. : max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_signal_lag_sizes", + "output": "max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_signal_lag_sizes", + "output": "max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max signal lag sizes", + "output": "max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_signal_lag_sizes", + "output": "max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_signal_lag_sizes", + "output": "max signal lag sizes config.toml: How many samples of lag sizes to use for a single time group (single time series signal)" + }, + { + "prompt_type": "plain", + "instruction": ": What does sample_lag_sizes do? : sample lag sizes config.toml: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "plain", + "instruction": ": Explain sample_lag_sizes. : sample lag sizes config.toml: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to sample lag sizes: . : Set the sample lag sizes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sample_lag_sizes", + "output": "sample lag sizes config.toml: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sample_lag_sizes", + "output": "sample lag sizes config.toml: Whether to sample lag sizes: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "sample lag sizes", + "output": "sample lag sizes config.toml: Whether to sample lag sizes: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to sample lag sizes: ", + "output": "sample lag sizes config.toml: Whether to sample lag sizes: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting sample_lag_sizes", + "output": "sample lag sizes config.toml: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting sample_lag_sizes", + "output": "sample lag sizes config.toml: Whether to sample lag sizes: If enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size, esp. when many unavailable columns for prediction." + }, + { + "prompt_type": "plain", + "instruction": ": What does max_sampled_lag_sizes do? : max sampled lag sizes config.toml: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_sampled_lag_sizes. : max sampled lag sizes config.toml: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Number of sampled lag sizes. -1 for auto.: . : Set the max sampled lag sizes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_sampled_lag_sizes", + "output": "max sampled lag sizes config.toml: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_sampled_lag_sizes", + "output": "max sampled lag sizes config.toml: Number of sampled lag sizes. -1 for auto.: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max sampled lag sizes", + "output": "max sampled lag sizes config.toml: Number of sampled lag sizes. -1 for auto.: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Number of sampled lag sizes. -1 for auto.: ", + "output": "max sampled lag sizes config.toml: Number of sampled lag sizes. -1 for auto.: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_sampled_lag_sizes", + "output": "max sampled lag sizes config.toml: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_sampled_lag_sizes", + "output": "max sampled lag sizes config.toml: Number of sampled lag sizes. -1 for auto.: If sample_lag_sizes is enabled, sample from a set of possible lag sizes (e.g., lags=[1, 4, 8]) for each lag-based transformer, to no more than max_sampled_lag_sizes lags. Can help reduce overall model complexity and size. Defaults to -1 (auto), in which case it's the same as the feature interaction depth controlled by max_feature_interaction_depth." + }, + { + "prompt_type": "plain", + "instruction": ": What does override_lag_sizes do? : override lag sizes config.toml: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": Explain override_lag_sizes. : override lag sizes config.toml: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Time-series lags override, e.g. [7, 14, 21]: . : Set the override lag sizes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_lag_sizes", + "output": "override lag sizes config.toml: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_lag_sizes", + "output": "override lag sizes config.toml: Time-series lags override, e.g. [7, 14, 21]: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override lag sizes", + "output": "override lag sizes config.toml: Time-series lags override, e.g. [7, 14, 21]: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Time-series lags override, e.g. [7, 14, 21]: ", + "output": "override lag sizes config.toml: Time-series lags override, e.g. [7, 14, 21]: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting override_lag_sizes", + "output": "override lag sizes config.toml: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting override_lag_sizes", + "output": "override lag sizes config.toml: Time-series lags override, e.g. [7, 14, 21]: Override lags to be usede.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": What does override_ufapt_lag_sizes do? : override ufapt lag sizes config.toml: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": Explain override_ufapt_lag_sizes. : override ufapt lag sizes config.toml: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Lags override for features that are not known ahead of time: . : Set the override ufapt lag sizes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_ufapt_lag_sizes", + "output": "override ufapt lag sizes config.toml: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_ufapt_lag_sizes", + "output": "override ufapt lag sizes config.toml: Lags override for features that are not known ahead of time: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override ufapt lag sizes", + "output": "override ufapt lag sizes config.toml: Lags override for features that are not known ahead of time: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Lags override for features that are not known ahead of time: ", + "output": "override ufapt lag sizes config.toml: Lags override for features that are not known ahead of time: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting override_ufapt_lag_sizes", + "output": "override ufapt lag sizes config.toml: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting override_ufapt_lag_sizes", + "output": "override ufapt lag sizes config.toml: Lags override for features that are not known ahead of time: Override lags to be used for features that are not known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": What does override_non_ufapt_lag_sizes do? : override non ufapt lag sizes config.toml: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": Explain override_non_ufapt_lag_sizes. : override non ufapt lag sizes config.toml: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Lags override for features that are known ahead of time: . : Set the override non ufapt lag sizes config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_non_ufapt_lag_sizes", + "output": "override non ufapt lag sizes config.toml: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override_non_ufapt_lag_sizes", + "output": "override non ufapt lag sizes config.toml: Lags override for features that are known ahead of time: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "override non ufapt lag sizes", + "output": "override non ufapt lag sizes config.toml: Lags override for features that are known ahead of time: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Lags override for features that are known ahead of time: ", + "output": "override non ufapt lag sizes config.toml: Lags override for features that are known ahead of time: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting override_non_ufapt_lag_sizes", + "output": "override non ufapt lag sizes config.toml: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting override_non_ufapt_lag_sizes", + "output": "override non ufapt lag sizes config.toml: Lags override for features that are known ahead of time: Override lags to be used for features that are known ahead of timee.g. [7, 14, 21] # this exact liste.g. 21 # produce from 1 to 21e.g. 21:3 produce from 1 to 21 in step of 3e.g. 5-21 produce from 5 to 21e.g. 5-21:3 produce from 5 to 21 in step of 3 " + }, + { + "prompt_type": "plain", + "instruction": ": What does min_lag_size do? : min lag size config.toml: Smallest considered lag size" + }, + { + "prompt_type": "plain", + "instruction": ": Explain min_lag_size. : min lag size config.toml: Smallest considered lag size" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Smallest considered lag size (-1 = auto): . : Set the min lag size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_lag_size", + "output": "min lag size config.toml: Smallest considered lag size" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min_lag_size", + "output": "min lag size config.toml: Smallest considered lag size (-1 = auto): Smallest considered lag size" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "min lag size", + "output": "min lag size config.toml: Smallest considered lag size (-1 = auto): Smallest considered lag size" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Smallest considered lag size (-1 = auto): ", + "output": "min lag size config.toml: Smallest considered lag size (-1 = auto): Smallest considered lag size" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting min_lag_size", + "output": "min lag size config.toml: Smallest considered lag size" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting min_lag_size", + "output": "min lag size config.toml: Smallest considered lag size (-1 = auto): Smallest considered lag size" + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_time_column_as_feature do? : allow time column as feature config.toml: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_time_column_as_feature. : allow time column as feature config.toml: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable feature engineering from time column: . : Set the allow time column as feature config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_time_column_as_feature", + "output": "allow time column as feature config.toml: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_time_column_as_feature", + "output": "allow time column as feature config.toml: Enable feature engineering from time column: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow time column as feature", + "output": "allow time column as feature config.toml: Enable feature engineering from time column: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable feature engineering from time column: ", + "output": "allow time column as feature config.toml: Enable feature engineering from time column: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_time_column_as_feature", + "output": "allow time column as feature config.toml: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_time_column_as_feature", + "output": "allow time column as feature config.toml: Enable feature engineering from time column: Whether to enable feature engineering based on selected time column, e.g. Date~weekday." + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_time_column_as_numeric_feature do? : allow time column as numeric feature config.toml: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_time_column_as_numeric_feature. : allow time column as numeric feature config.toml: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Allow integer time column as numeric feature: . : Set the allow time column as numeric feature config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_time_column_as_numeric_feature", + "output": "allow time column as numeric feature config.toml: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_time_column_as_numeric_feature", + "output": "allow time column as numeric feature config.toml: Allow integer time column as numeric feature: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow time column as numeric feature", + "output": "allow time column as numeric feature config.toml: Allow integer time column as numeric feature: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allow integer time column as numeric feature: ", + "output": "allow time column as numeric feature config.toml: Allow integer time column as numeric feature: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_time_column_as_numeric_feature", + "output": "allow time column as numeric feature config.toml: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_time_column_as_numeric_feature", + "output": "allow time column as numeric feature config.toml: Allow integer time column as numeric feature: Whether to enable integer time column to be used as a numeric feature.If using time series recipe, using time column (numeric time stamps) as input features can lead to model thatmemorizes the actual time stamps instead of features that generalize to the future. " + }, + { + "prompt_type": "plain", + "instruction": ": What does datetime_funcs do? : datetime funcs config.toml: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "plain", + "instruction": ": Explain datetime_funcs. : datetime funcs config.toml: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Allowed date and date-time transformations: . : Set the datetime funcs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datetime_funcs", + "output": "datetime funcs config.toml: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datetime_funcs", + "output": "datetime funcs config.toml: Allowed date and date-time transformations: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datetime funcs", + "output": "datetime funcs config.toml: Allowed date and date-time transformations: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Allowed date and date-time transformations: ", + "output": "datetime funcs config.toml: Allowed date and date-time transformations: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting datetime_funcs", + "output": "datetime funcs config.toml: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting datetime_funcs", + "output": "datetime funcs config.toml: Allowed date and date-time transformations: Allowed date or date-time transformations. Date transformers include: year, quarter, month, week, weekday, day, dayofyear, num. Date transformers also include: hour, minute, second. Features in DAI will show up as get_ + transformation name. E.g. num is a direct numeric value representing the floating point value of time, which can lead to over-fitting if used on IID problems. So this is turned off by default." + }, + { + "prompt_type": "plain", + "instruction": ": What does filter_datetime_funcs do? : filter datetime funcs config.toml: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain filter_datetime_funcs. : filter datetime funcs config.toml: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Auto filtering of date and date-time transformations: . : Set the filter datetime funcs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "filter_datetime_funcs", + "output": "filter datetime funcs config.toml: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "filter_datetime_funcs", + "output": "filter datetime funcs config.toml: Auto filtering of date and date-time transformations: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "filter datetime funcs", + "output": "filter datetime funcs config.toml: Auto filtering of date and date-time transformations: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Auto filtering of date and date-time transformations: ", + "output": "filter datetime funcs config.toml: Auto filtering of date and date-time transformations: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting filter_datetime_funcs", + "output": "filter datetime funcs config.toml: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting filter_datetime_funcs", + "output": "filter datetime funcs config.toml: Auto filtering of date and date-time transformations: Whether to filter out date and date-time transformations that lead to unseen values in the future. " + }, + { + "prompt_type": "plain", + "instruction": ": What does allow_tgc_as_features do? : allow tgc as features config.toml: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allow_tgc_as_features. : allow tgc as features config.toml: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Consider time groups columns as standalone features: . : Set the allow tgc as features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_tgc_as_features", + "output": "allow tgc as features config.toml: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow_tgc_as_features", + "output": "allow tgc as features config.toml: Consider time groups columns as standalone features: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allow tgc as features", + "output": "allow tgc as features config.toml: Consider time groups columns as standalone features: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Consider time groups columns as standalone features: ", + "output": "allow tgc as features config.toml: Consider time groups columns as standalone features: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allow_tgc_as_features", + "output": "allow tgc as features config.toml: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allow_tgc_as_features", + "output": "allow tgc as features config.toml: Consider time groups columns as standalone features: Whether to consider time groups columns (tgc) as standalone features. Note that 'time_column' is treated separately via 'Allow to engineer features from time column'. Note that tgc_allow_target_encoding independently controls if time column groups are target encoded. Use allowed_coltypes_for_tgc_as_features for control per feature type. " + }, + { + "prompt_type": "plain", + "instruction": ": What does allowed_coltypes_for_tgc_as_features do? : allowed coltypes for tgc as features config.toml: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain allowed_coltypes_for_tgc_as_features. : allowed coltypes for tgc as features config.toml: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Which tgc feature types to consider as standalone features: . : Set the allowed coltypes for tgc as features config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allowed_coltypes_for_tgc_as_features", + "output": "allowed coltypes for tgc as features config.toml: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allowed_coltypes_for_tgc_as_features", + "output": "allowed coltypes for tgc as features config.toml: Which tgc feature types to consider as standalone features: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "allowed coltypes for tgc as features", + "output": "allowed coltypes for tgc as features config.toml: Which tgc feature types to consider as standalone features: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Which tgc feature types to consider as standalone features: ", + "output": "allowed coltypes for tgc as features config.toml: Which tgc feature types to consider as standalone features: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting allowed_coltypes_for_tgc_as_features", + "output": "allowed coltypes for tgc as features config.toml: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting allowed_coltypes_for_tgc_as_features", + "output": "allowed coltypes for tgc as features config.toml: Which tgc feature types to consider as standalone features: Which time groups columns (tgc) feature types to consider as standalone features,if the corresponding flag \"Consider time groups columns as standalone features\" is set to true.E.g. all column types would be [\"numeric\", \"categorical\", \"ohe_categorical\", \"datetime\", \"date\", \"text\"]Note that 'time_column' is treated separately via 'Allow to engineer features from time column'.Note that if lag-based time series recipe is disabled, then all tgc are allowed features. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_time_unaware_transformers do? : enable time unaware transformers config.toml: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_time_unaware_transformers. : enable time unaware transformers config.toml: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable time unaware transformers: . : Set the enable time unaware transformers config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_time_unaware_transformers", + "output": "enable time unaware transformers config.toml: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_time_unaware_transformers", + "output": "enable time unaware transformers config.toml: Enable time unaware transformers: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable time unaware transformers", + "output": "enable time unaware transformers config.toml: Enable time unaware transformers: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable time unaware transformers: ", + "output": "enable time unaware transformers config.toml: Enable time unaware transformers: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_time_unaware_transformers", + "output": "enable time unaware transformers config.toml: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_time_unaware_transformers", + "output": "enable time unaware transformers config.toml: Enable time unaware transformers: Whether various transformers (clustering, truncated SVD) are enabled,that otherwise would be disabled for time series due topotential to overfit by leaking across time within the fit of each fold. " + }, + { + "prompt_type": "plain", + "instruction": ": What does tgc_only_use_all_groups do? : tgc only use all groups config.toml: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tgc_only_use_all_groups. : tgc only use all groups config.toml: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Always group by all time groups columns for creating lag features: . : Set the tgc only use all groups config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_only_use_all_groups", + "output": "tgc only use all groups config.toml: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_only_use_all_groups", + "output": "tgc only use all groups config.toml: Always group by all time groups columns for creating lag features: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc only use all groups", + "output": "tgc only use all groups config.toml: Always group by all time groups columns for creating lag features: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Always group by all time groups columns for creating lag features: ", + "output": "tgc only use all groups config.toml: Always group by all time groups columns for creating lag features: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tgc_only_use_all_groups", + "output": "tgc only use all groups config.toml: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tgc_only_use_all_groups", + "output": "tgc only use all groups config.toml: Always group by all time groups columns for creating lag features: Whether to group by all time groups columns for creating lag features, instead of sampling from them" + }, + { + "prompt_type": "plain", + "instruction": ": What does tgc_allow_target_encoding do? : tgc allow target encoding config.toml: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tgc_allow_target_encoding. : tgc allow target encoding config.toml: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Target encoding of time groups: . : Set the tgc allow target encoding config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_allow_target_encoding", + "output": "tgc allow target encoding config.toml: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_allow_target_encoding", + "output": "tgc allow target encoding config.toml: Target encoding of time groups: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc allow target encoding", + "output": "tgc allow target encoding config.toml: Target encoding of time groups: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Target encoding of time groups: ", + "output": "tgc allow target encoding config.toml: Target encoding of time groups: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tgc_allow_target_encoding", + "output": "tgc allow target encoding config.toml: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tgc_allow_target_encoding", + "output": "tgc allow target encoding config.toml: Target encoding of time groups: Whether to allow target encoding of time groups. This can be useful if there are many groups. Note that allow_tgc_as_features independently controls if tgc are treated as normal features. 'auto': Choose CV by default. 'CV': Enable out-of-fold and CV-in-CV (if enabled) encoding 'simple': Simple memorized targets per group. 'off': Disable. Only relevant for time series experiments that have at least one time column group apart from the time column." + }, + { + "prompt_type": "plain", + "instruction": ": What does tgc_allow_features_and_target_encoding_auto_tune do? : tgc allow features and target encoding auto tune config.toml: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "plain", + "instruction": ": Explain tgc_allow_features_and_target_encoding_auto_tune. : tgc allow features and target encoding auto tune config.toml: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Auto-Tune time column groups as features and target encoding: . : Set the tgc allow features and target encoding auto tune config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_allow_features_and_target_encoding_auto_tune", + "output": "tgc allow features and target encoding auto tune config.toml: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_allow_features_and_target_encoding_auto_tune", + "output": "tgc allow features and target encoding auto tune config.toml: Auto-Tune time column groups as features and target encoding: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc allow features and target encoding auto tune", + "output": "tgc allow features and target encoding auto tune config.toml: Auto-Tune time column groups as features and target encoding: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Auto-Tune time column groups as features and target encoding: ", + "output": "tgc allow features and target encoding auto tune config.toml: Auto-Tune time column groups as features and target encoding: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tgc_allow_features_and_target_encoding_auto_tune", + "output": "tgc allow features and target encoding auto tune config.toml: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tgc_allow_features_and_target_encoding_auto_tune", + "output": "tgc allow features and target encoding auto tune config.toml: Auto-Tune time column groups as features and target encoding: if allow_tgc_as_features is true or tgc_allow_target_encoding is true, whether to try both possibilities to see which does better during tuning. Safer than forcing one way or the other." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_holdout_preds do? : time series holdout preds config.toml: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_holdout_preds. : time series holdout preds config.toml: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Generate Time-Series Holdout Predictions: . : Set the time series holdout preds config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_holdout_preds", + "output": "time series holdout preds config.toml: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_holdout_preds", + "output": "time series holdout preds config.toml: Generate Time-Series Holdout Predictions: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series holdout preds", + "output": "time series holdout preds config.toml: Generate Time-Series Holdout Predictions: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Generate Time-Series Holdout Predictions: ", + "output": "time series holdout preds config.toml: Generate Time-Series Holdout Predictions: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_holdout_preds", + "output": "time series holdout preds config.toml: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_holdout_preds", + "output": "time series holdout preds config.toml: Generate Time-Series Holdout Predictions: Enable creation of holdout predictions on training data using moving windows (useful for MLI, but can be slow)" + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_max_holdout_splits do? : time series max holdout splits config.toml: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_max_holdout_splits. : time series max holdout splits config.toml: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Maximum number of splits used for creating final time-series model's holdout predictions: . : Set the time series max holdout splits config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_max_holdout_splits", + "output": "time series max holdout splits config.toml: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_max_holdout_splits", + "output": "time series max holdout splits config.toml: Maximum number of splits used for creating final time-series model's holdout predictions: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series max holdout splits", + "output": "time series max holdout splits config.toml: Maximum number of splits used for creating final time-series model's holdout predictions: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Maximum number of splits used for creating final time-series model's holdout predictions: ", + "output": "time series max holdout splits config.toml: Maximum number of splits used for creating final time-series model's holdout predictions: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_max_holdout_splits", + "output": "time series max holdout splits config.toml: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_max_holdout_splits", + "output": "time series max holdout splits config.toml: Maximum number of splits used for creating final time-series model's holdout predictions: Max number of splits used for creating final time-series model's holdout/backtesting predictions. With the default value '-1' the same amount of splits as during model validation will be used. Use 'time_series_validation_splits' to control amount of time-based splits used for model validation." + }, + { + "prompt_type": "plain", + "instruction": ": What does blend_in_link_space do? : blend in link space config.toml: Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "plain", + "instruction": ": Explain blend_in_link_space. : blend in link space config.toml: Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to blend ensembles in link space (applies to classification only): . : Set the blend in link space config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "blend_in_link_space", + "output": "blend in link space config.toml: Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "blend_in_link_space", + "output": "blend in link space config.toml: Whether to blend ensembles in link space (applies to classification only): Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "blend in link space", + "output": "blend in link space config.toml: Whether to blend ensembles in link space (applies to classification only): Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to blend ensembles in link space (applies to classification only): ", + "output": "blend in link space config.toml: Whether to blend ensembles in link space (applies to classification only): Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting blend_in_link_space", + "output": "blend in link space config.toml: Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting blend_in_link_space", + "output": "blend in link space config.toml: Whether to blend ensembles in link space (applies to classification only): Whether to blend ensembles in link space, so that can apply inverse link function to get predictions after blending. This allows to get Shapley values to sum up to final predictions, after applying inverse link function: preds = inverse_link( (blend(base learner predictions in link space ))) = inverse_link(sum(blend(base learner shapley values in link space))) = inverse_link(sum( ensemble shapley values in link space ))For binary classification, this is only supported if inverse_link = logistic = 1/(1+exp(-x))For multiclass classification, this is only supported if inverse_link = softmax = exp(x)/sum(exp(x))For regression, this behavior happens naturally if all base learners use the identity link function, otherwise not possible" + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_ts_fast_approx do? : mli ts fast approx config.toml: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_ts_fast_approx. : mli ts fast approx config.toml: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to speed up calculation of Time-Series Holdout Predictions: . : Set the mli ts fast approx config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_fast_approx", + "output": "mli ts fast approx config.toml: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_fast_approx", + "output": "mli ts fast approx config.toml: Whether to speed up calculation of Time-Series Holdout Predictions: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli ts fast approx", + "output": "mli ts fast approx config.toml: Whether to speed up calculation of Time-Series Holdout Predictions: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to speed up calculation of Time-Series Holdout Predictions: ", + "output": "mli ts fast approx config.toml: Whether to speed up calculation of Time-Series Holdout Predictions: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_ts_fast_approx", + "output": "mli ts fast approx config.toml: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_ts_fast_approx", + "output": "mli ts fast approx config.toml: Whether to speed up calculation of Time-Series Holdout Predictions: Whether to speed up time-series holdout predictions for back-testing on training data (used for MLI and metrics calculation). Can be slightly less accurate." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_ts_fast_approx_contribs do? : mli ts fast approx contribs config.toml: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_ts_fast_approx_contribs. : mli ts fast approx contribs config.toml: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Whether to speed up calculation of Shapley values for Time-Series Holdout Predictions: . : Set the mli ts fast approx contribs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_fast_approx_contribs", + "output": "mli ts fast approx contribs config.toml: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_fast_approx_contribs", + "output": "mli ts fast approx contribs config.toml: Whether to speed up calculation of Shapley values for Time-Series Holdout Predictions: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli ts fast approx contribs", + "output": "mli ts fast approx contribs config.toml: Whether to speed up calculation of Shapley values for Time-Series Holdout Predictions: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Whether to speed up calculation of Shapley values for Time-Series Holdout Predictions: ", + "output": "mli ts fast approx contribs config.toml: Whether to speed up calculation of Shapley values for Time-Series Holdout Predictions: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_ts_fast_approx_contribs", + "output": "mli ts fast approx contribs config.toml: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_ts_fast_approx_contribs", + "output": "mli ts fast approx contribs config.toml: Whether to speed up calculation of Shapley values for Time-Series Holdout Predictions: Whether to speed up Shapley values for time-series holdout predictions for back-testing on training data (used for MLI). Can be slightly less accurate." + }, + { + "prompt_type": "plain", + "instruction": ": What does mli_ts_holdout_contribs do? : mli ts holdout contribs config.toml: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "plain", + "instruction": ": Explain mli_ts_holdout_contribs. : mli ts holdout contribs config.toml: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Generate Shapley values for Time-Series Holdout Predictions at the time of experiment: . : Set the mli ts holdout contribs config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_holdout_contribs", + "output": "mli ts holdout contribs config.toml: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli_ts_holdout_contribs", + "output": "mli ts holdout contribs config.toml: Generate Shapley values for Time-Series Holdout Predictions at the time of experiment: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mli ts holdout contribs", + "output": "mli ts holdout contribs config.toml: Generate Shapley values for Time-Series Holdout Predictions at the time of experiment: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Generate Shapley values for Time-Series Holdout Predictions at the time of experiment: ", + "output": "mli ts holdout contribs config.toml: Generate Shapley values for Time-Series Holdout Predictions at the time of experiment: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mli_ts_holdout_contribs", + "output": "mli ts holdout contribs config.toml: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mli_ts_holdout_contribs", + "output": "mli ts holdout contribs config.toml: Generate Shapley values for Time-Series Holdout Predictions at the time of experiment: Enable creation of Shapley values for holdout predictions on training data using moving windows (useful for MLI, but can be slow), at the time of the experiment. If disabled, MLI will generate Shapley values on demand." + }, + { + "prompt_type": "plain", + "instruction": ": What does time_series_min_interpretability do? : time series min interpretability config.toml: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": Explain time_series_min_interpretability. : time series min interpretability config.toml: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Lower limit on interpretability setting for time-series experiments, implicitly enforced.: . : Set the time series min interpretability config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_min_interpretability", + "output": "time series min interpretability config.toml: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time_series_min_interpretability", + "output": "time series min interpretability config.toml: Lower limit on interpretability setting for time-series experiments, implicitly enforced.: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "time series min interpretability", + "output": "time series min interpretability config.toml: Lower limit on interpretability setting for time-series experiments, implicitly enforced.: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Lower limit on interpretability setting for time-series experiments, implicitly enforced.: ", + "output": "time series min interpretability config.toml: Lower limit on interpretability setting for time-series experiments, implicitly enforced.: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting time_series_min_interpretability", + "output": "time series min interpretability config.toml: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting time_series_min_interpretability", + "output": "time series min interpretability config.toml: Lower limit on interpretability setting for time-series experiments, implicitly enforced.: Values of 5 or more can improve generalization by more aggressive dropping of least important features. Set to 1 to disable." + }, + { + "prompt_type": "plain", + "instruction": ": What does lags_dropout do? : lags dropout config.toml: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "plain", + "instruction": ": Explain lags_dropout. : lags dropout config.toml: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Dropout mode for lag features: . : Set the lags dropout config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lags_dropout", + "output": "lags dropout config.toml: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lags_dropout", + "output": "lags dropout config.toml: Dropout mode for lag features: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "lags dropout", + "output": "lags dropout config.toml: Dropout mode for lag features: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Dropout mode for lag features: ", + "output": "lags dropout config.toml: Dropout mode for lag features: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting lags_dropout", + "output": "lags dropout config.toml: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting lags_dropout", + "output": "lags dropout config.toml: Dropout mode for lag features: Dropout mode for lag features in order to achieve an equal n.a.-ratio between train and validation/test. The independent mode performs a simple feature-wise dropout, whereas the dependent one takes lag-size dependencies per sample/row into account." + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_lag_non_targets do? : prob lag non targets config.toml: Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_lag_non_targets. : prob lag non targets config.toml: Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability to create non-target lag features (-1.0 = auto): . : Set the prob lag non targets config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_lag_non_targets", + "output": "prob lag non targets config.toml: Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_lag_non_targets", + "output": "prob lag non targets config.toml: Probability to create non-target lag features (-1.0 = auto): Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob lag non targets", + "output": "prob lag non targets config.toml: Probability to create non-target lag features (-1.0 = auto): Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability to create non-target lag features (-1.0 = auto): ", + "output": "prob lag non targets config.toml: Probability to create non-target lag features (-1.0 = auto): Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_lag_non_targets", + "output": "prob lag non targets config.toml: Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_lag_non_targets", + "output": "prob lag non targets config.toml: Probability to create non-target lag features (-1.0 = auto): Normalized probability of choosing to lag non-targets relative to targets (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": What does rolling_test_method do? : rolling test method config.toml: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "plain", + "instruction": ": Explain rolling_test_method. : rolling test method config.toml: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Method to create rolling test set predictions: . : Set the rolling test method config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rolling_test_method", + "output": "rolling test method config.toml: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rolling_test_method", + "output": "rolling test method config.toml: Method to create rolling test set predictions: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rolling test method", + "output": "rolling test method config.toml: Method to create rolling test set predictions: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Method to create rolling test set predictions: ", + "output": "rolling test method config.toml: Method to create rolling test set predictions: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting rolling_test_method", + "output": "rolling test method config.toml: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting rolling_test_method", + "output": "rolling test method config.toml: Method to create rolling test set predictions: Method to create rolling test set predictions, if the forecast horizon is shorter than the time span of the test set. One can choose between test time augmentation (TTA) and a successive refitting of the final pipeline." + }, + { + "prompt_type": "plain", + "instruction": ": What does rolling_test_method_max_splits do? : rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain rolling_test_method_max_splits. : rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rolling_test_method_max_splits", + "output": "rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rolling_test_method_max_splits", + "output": "rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "rolling test method max splits", + "output": "rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: ", + "output": "rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting rolling_test_method_max_splits", + "output": "rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting rolling_test_method_max_splits", + "output": "rolling test method max splits config.toml: Max number of splits for 'refit' method to avoid OOM/slowness, both for GA and final refit. In GA, will fall back to fast_tta, in final will fail with error msg.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_tta_internal do? : fast tta internal config.toml: Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_tta_internal. : fast tta internal config.toml: Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fast TTA for internal validation (feature evolution and holdout predictions): . : Set the fast tta internal config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_tta_internal", + "output": "fast tta internal config.toml: Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_tta_internal", + "output": "fast tta internal config.toml: Fast TTA for internal validation (feature evolution and holdout predictions): Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast tta internal", + "output": "fast tta internal config.toml: Fast TTA for internal validation (feature evolution and holdout predictions): Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fast TTA for internal validation (feature evolution and holdout predictions): ", + "output": "fast tta internal config.toml: Fast TTA for internal validation (feature evolution and holdout predictions): Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_tta_internal", + "output": "fast tta internal config.toml: Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_tta_internal", + "output": "fast tta internal config.toml: Fast TTA for internal validation (feature evolution and holdout predictions): Apply TTA in one pass instead of using rolling windows for internal validation split predictions. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "plain", + "instruction": ": What does fast_tta_test do? : fast tta test config.toml: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "plain", + "instruction": ": Explain fast_tta_test. : fast tta test config.toml: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Fast TTA for test set predictions: . : Set the fast tta test config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_tta_test", + "output": "fast tta test config.toml: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast_tta_test", + "output": "fast tta test config.toml: Fast TTA for test set predictions: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "fast tta test", + "output": "fast tta test config.toml: Fast TTA for test set predictions: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Fast TTA for test set predictions: ", + "output": "fast tta test config.toml: Fast TTA for test set predictions: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting fast_tta_test", + "output": "fast tta test config.toml: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting fast_tta_test", + "output": "fast tta test config.toml: Fast TTA for test set predictions: Apply TTA in one pass instead of using rolling windows for test set predictions. This only applies if the forecast horizon is shorter than the time span of the test set. Note: Setting this to 'False' leads to significantly longer runtimes." + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_default_lags do? : prob default lags config.toml: Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_default_lags. : prob default lags config.toml: Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability for new time-series transformers to use default lags (-1.0 = auto): . : Set the prob default lags config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_default_lags", + "output": "prob default lags config.toml: Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_default_lags", + "output": "prob default lags config.toml: Probability for new time-series transformers to use default lags (-1.0 = auto): Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob default lags", + "output": "prob default lags config.toml: Probability for new time-series transformers to use default lags (-1.0 = auto): Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability for new time-series transformers to use default lags (-1.0 = auto): ", + "output": "prob default lags config.toml: Probability for new time-series transformers to use default lags (-1.0 = auto): Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_default_lags", + "output": "prob default lags config.toml: Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_default_lags", + "output": "prob default lags config.toml: Probability for new time-series transformers to use default lags (-1.0 = auto): Probability for new Lags/EWMA gene to use default lags (determined by frequency/gap/horizon, independent of data) (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_lagsinteraction do? : prob lagsinteraction config.toml: Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_lagsinteraction. : prob lagsinteraction config.toml: Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability of exploring interaction-based lag transformers (-1.0 = auto): . : Set the prob lagsinteraction config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_lagsinteraction", + "output": "prob lagsinteraction config.toml: Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_lagsinteraction", + "output": "prob lagsinteraction config.toml: Probability of exploring interaction-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob lagsinteraction", + "output": "prob lagsinteraction config.toml: Probability of exploring interaction-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability of exploring interaction-based lag transformers (-1.0 = auto): ", + "output": "prob lagsinteraction config.toml: Probability of exploring interaction-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_lagsinteraction", + "output": "prob lagsinteraction config.toml: Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_lagsinteraction", + "output": "prob lagsinteraction config.toml: Probability of exploring interaction-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on interactions (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": What does prob_lagsaggregates do? : prob lagsaggregates config.toml: Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain prob_lagsaggregates. : prob lagsaggregates config.toml: Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Probability of exploring aggregation-based lag transformers (-1.0 = auto): . : Set the prob lagsaggregates config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_lagsaggregates", + "output": "prob lagsaggregates config.toml: Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob_lagsaggregates", + "output": "prob lagsaggregates config.toml: Probability of exploring aggregation-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "prob lagsaggregates", + "output": "prob lagsaggregates config.toml: Probability of exploring aggregation-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Probability of exploring aggregation-based lag transformers (-1.0 = auto): ", + "output": "prob lagsaggregates config.toml: Probability of exploring aggregation-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting prob_lagsaggregates", + "output": "prob lagsaggregates config.toml: Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting prob_lagsaggregates", + "output": "prob lagsaggregates config.toml: Probability of exploring aggregation-based lag transformers (-1.0 = auto): Unnormalized probability of choosing other lag time-series transformers based on aggregations (-1.0 = auto)" + }, + { + "prompt_type": "plain", + "instruction": ": What does ts_target_trafo do? : ts target trafo config.toml: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "plain", + "instruction": ": Explain ts_target_trafo. : ts target trafo config.toml: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Time series centering or detrending transformation: . : Set the ts target trafo config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo", + "output": "ts target trafo config.toml: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo", + "output": "ts target trafo config.toml: Time series centering or detrending transformation: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts target trafo", + "output": "ts target trafo config.toml: Time series centering or detrending transformation: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Time series centering or detrending transformation: ", + "output": "ts target trafo config.toml: Time series centering or detrending transformation: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ts_target_trafo", + "output": "ts target trafo config.toml: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ts_target_trafo", + "output": "ts target trafo config.toml: Time series centering or detrending transformation: Time series centering or detrending transformation. The free parameter(s) of the trend model are fitted and the trend is removed from the target signal, and the pipeline is fitted on the residuals. Predictions are made by adding back the trend. Note: Can be cascaded with 'Time series lag-based target transformation', but is mutually exclusive with regular target transformations. The robust centering or linear detrending variants use RANSAC to achieve a higher tolerance w.r.t. outliers. The Epidemic target transformer uses the SEIR model: https://en.wikipedia.org/wiki/Compartmental_models_in_epidemiology#The_SEIR_model" + }, + { + "prompt_type": "plain", + "instruction": ": What does ts_target_trafo_epidemic_params_dict do? : ts target trafo epidemic params dict config.toml: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ts_target_trafo_epidemic_params_dict. : ts target trafo epidemic params dict config.toml: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Custom bounds for SEIRD epidemic model parameters: . : Set the ts target trafo epidemic params dict config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo_epidemic_params_dict", + "output": "ts target trafo epidemic params dict config.toml: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo_epidemic_params_dict", + "output": "ts target trafo epidemic params dict config.toml: Custom bounds for SEIRD epidemic model parameters: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts target trafo epidemic params dict", + "output": "ts target trafo epidemic params dict config.toml: Custom bounds for SEIRD epidemic model parameters: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Custom bounds for SEIRD epidemic model parameters: ", + "output": "ts target trafo epidemic params dict config.toml: Custom bounds for SEIRD epidemic model parameters: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ts_target_trafo_epidemic_params_dict", + "output": "ts target trafo epidemic params dict config.toml: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ts_target_trafo_epidemic_params_dict", + "output": "ts target trafo epidemic params dict config.toml: Custom bounds for SEIRD epidemic model parameters: Dictionary to control Epidemic SEIRD model for de-trending of target per time series group.Note: The target column must correspond to I(t), the infected cases as a function of time.For each training split and time series group, the SEIRD model is fitted to the target signal (by optimizingthe free parameters shown below for each time series group).Then, the SEIRD model's value is subtracted from the training response, and the residuals are passed tothe feature engineering and modeling pipeline. For predictions, the SEIRD model's value is added to the residualpredictions from the pipeline, for each time series group.Note: Careful selection of the bounds for the free parameters N, beta, gamma, delta, alpha, rho, lockdown,beta_decay, beta_decay_rate is extremely important for good results.- S(t) : susceptible/healthy/not immune- E(t) : exposed/not yet infectious- I(t) : infectious/active <= target column- R(t) : recovered/immune- D(t) : deceased### Free parameters:- N : total population, N=S+E+I+R+D- beta : rate of exposure (S -> E)- gamma : rate of recovering (I -> R)- delta : incubation period- alpha : fatality rate- rho : rate at which people die- lockdown : day of lockdown (-1 => no lockdown)- beta_decay : beta decay due to lockdown- beta_decay_rate : speed of beta decay### Dynamics:if lockdown >= 0: beta_min = beta * (1 - beta_decay) beta = (beta - beta_min) / (1 + np.exp(-beta_decay_rate * (-t + lockdown))) + beta_mindSdt = -beta * S * I / NdEdt = beta * S * I / N - delta * EdIdt = delta * E - (1 - alpha) * gamma * I - alpha * rho * IdRdt = (1 - alpha) * gamma * IdDdt = alpha * rho * IProvide lower/upper bounds for each parameter you want to control the bounds for. Valid parameters are:N_min, N_max, beta_min, beta_max, gamma_min, gamma_max, delta_min, delta_max, alpha_min, alpha_max,rho_min, rho_max, lockdown_min, lockdown_max, beta_decay_min, beta_decay_max,beta_decay_rate_min, beta_decay_rate_max. You can change any subset of parameters, e.g.,ts_target_trafo_epidemic_params_dict=\"{'N_min': 1000, 'beta_max': 0.2}\"To get SEIR model (in cases where death rates are very low, can speed up calculations significantly):set alpha_min=alpha_max=rho_min=rho_max=beta_decay_rate_min=beta_decay_rate_max=0, lockdown_min=lockdown_max=-1. " + }, + { + "prompt_type": "plain", + "instruction": ": What does ts_target_trafo_epidemic_target do? : ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "plain", + "instruction": ": Explain ts_target_trafo_epidemic_target. : ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo_epidemic_target", + "output": "ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo_epidemic_target", + "output": "ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts target trafo epidemic target", + "output": "ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: ", + "output": "ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ts_target_trafo_epidemic_target", + "output": "ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ts_target_trafo_epidemic_target", + "output": "ts target trafo epidemic target config.toml: Which SEIRD model component the target column corresponds to: I: Infected, R: Recovered, D: Deceased.: " + }, + { + "prompt_type": "plain", + "instruction": ": What does ts_lag_target_trafo do? : ts lag target trafo config.toml: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ts_lag_target_trafo. : ts lag target trafo config.toml: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Time series lag-based target transformation: . : Set the ts lag target trafo config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_lag_target_trafo", + "output": "ts lag target trafo config.toml: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_lag_target_trafo", + "output": "ts lag target trafo config.toml: Time series lag-based target transformation: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts lag target trafo", + "output": "ts lag target trafo config.toml: Time series lag-based target transformation: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Time series lag-based target transformation: ", + "output": "ts lag target trafo config.toml: Time series lag-based target transformation: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ts_lag_target_trafo", + "output": "ts lag target trafo config.toml: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ts_lag_target_trafo", + "output": "ts lag target trafo config.toml: Time series lag-based target transformation: Time series lag-based target transformation. One can choose between difference and ratio of the current and a lagged target. The corresponding lag size can be set via 'Target transformation lag size'. Note: Can be cascaded with 'Time series target transformation', but is mutually exclusive with regular target transformations." + }, + { + "prompt_type": "plain", + "instruction": ": What does ts_target_trafo_lag_size do? : ts target trafo lag size config.toml: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ts_target_trafo_lag_size. : ts target trafo lag size config.toml: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Lag size used for time series target transformation: . : Set the ts target trafo lag size config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo_lag_size", + "output": "ts target trafo lag size config.toml: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts_target_trafo_lag_size", + "output": "ts target trafo lag size config.toml: Lag size used for time series target transformation: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ts target trafo lag size", + "output": "ts target trafo lag size config.toml: Lag size used for time series target transformation: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Lag size used for time series target transformation: ", + "output": "ts target trafo lag size config.toml: Lag size used for time series target transformation: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ts_target_trafo_lag_size", + "output": "ts target trafo lag size config.toml: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ts_target_trafo_lag_size", + "output": "ts target trafo lag size config.toml: Lag size used for time series target transformation: Lag size used for time series target transformation. See setting 'Time series lag-based target transformation'. -1 => smallest valid value = prediction periods + gap (automatically adjusted by DAI if too small)." + }, + { + "prompt_type": "plain", + "instruction": ": What does tgc_via_ui_max_ncols do? : tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tgc_via_ui_max_ncols. : tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_via_ui_max_ncols", + "output": "tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_via_ui_max_ncols", + "output": "tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc via ui max ncols", + "output": "tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tgc_via_ui_max_ncols", + "output": "tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tgc_via_ui_max_ncols", + "output": "tgc via ui max ncols config.toml: Maximum amount of columns send from UI to backend in order to auto-detect TGC" + }, + { + "prompt_type": "plain", + "instruction": ": What does tgc_dup_tolerance do? : tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "plain", + "instruction": ": Explain tgc_dup_tolerance. : tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_dup_tolerance", + "output": "tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc_dup_tolerance", + "output": "tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "tgc dup tolerance", + "output": "tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting tgc_dup_tolerance", + "output": "tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting tgc_dup_tolerance", + "output": "tgc dup tolerance config.toml: Maximum frequency of duplicated timestamps for TGC detection" + }, + { + "prompt_type": "plain", + "instruction": ": What does timeseries_split_suggestion_timeout do? : timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "plain", + "instruction": ": Explain timeseries_split_suggestion_timeout. : timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Timeout in seconds for time-series properties detection in UI.: . : Set the timeseries split suggestion timeout config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "timeseries_split_suggestion_timeout", + "output": "timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "timeseries_split_suggestion_timeout", + "output": "timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI.: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "timeseries split suggestion timeout", + "output": "timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI.: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Timeout in seconds for time-series properties detection in UI.: ", + "output": "timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI.: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting timeseries_split_suggestion_timeout", + "output": "timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting timeseries_split_suggestion_timeout", + "output": "timeseries split suggestion timeout config.toml: Timeout in seconds for time-series properties detection in UI.: Timeout in seconds for time-series properties detection in UI." + }, + { + "prompt_type": "plain", + "instruction": ": What does timeseries_recency_weight_power do? : timeseries recency weight power config.toml: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain timeseries_recency_weight_power. : timeseries recency weight power config.toml: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Power of recency weight for TS splits: . : Set the timeseries recency weight power config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "timeseries_recency_weight_power", + "output": "timeseries recency weight power config.toml: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "timeseries_recency_weight_power", + "output": "timeseries recency weight power config.toml: Power of recency weight for TS splits: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "timeseries recency weight power", + "output": "timeseries recency weight power config.toml: Power of recency weight for TS splits: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Power of recency weight for TS splits: ", + "output": "timeseries recency weight power config.toml: Power of recency weight for TS splits: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting timeseries_recency_weight_power", + "output": "timeseries recency weight power config.toml: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting timeseries_recency_weight_power", + "output": "timeseries recency weight power config.toml: Power of recency weight for TS splits: Weight TS models scores as split number to this power. E.g. Use 1.0 to weight split closest to horizon by a factor that is number of splits larger than oldest split. Applies to tuning models and final back-testing models. If 0.0 (default) is used, median function is used, else mean is used. " + }, + { + "prompt_type": "plain", + "instruction": ": What does user_config_directory do? : user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "plain", + "instruction": ": Explain user_config_directory. : user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "user_config_directory", + "output": "user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "user_config_directory", + "output": "user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "user config directory", + "output": "user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting user_config_directory", + "output": "user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting user_config_directory", + "output": "user config directory config.toml: Every *.toml file is read from this directory and process the same way as main config file." + }, + { + "prompt_type": "plain", + "instruction": ": What does procsy_ip do? : procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "plain", + "instruction": ": Explain procsy_ip. : procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "procsy_ip", + "output": "procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "procsy_ip", + "output": "procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "procsy ip", + "output": "procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting procsy_ip", + "output": "procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting procsy_ip", + "output": "procsy ip config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "plain", + "instruction": ": What does procsy_port do? : procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "plain", + "instruction": ": Explain procsy_port. : procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "procsy_port", + "output": "procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "procsy_port", + "output": "procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "procsy port", + "output": "procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting procsy_port", + "output": "procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting procsy_port", + "output": "procsy port config.toml: IP address and port of procsy process." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_ip do? : h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_ip. : h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_ip", + "output": "h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_ip", + "output": "h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o ip", + "output": "h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_ip", + "output": "h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_ip", + "output": "h2o ip config.toml: IP address for use by MLI." + }, + { + "prompt_type": "plain", + "instruction": ": What does h2o_port do? : h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "plain", + "instruction": ": Explain h2o_port. : h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_port", + "output": "h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o_port", + "output": "h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "h2o port", + "output": "h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting h2o_port", + "output": "h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting h2o_port", + "output": "h2o port config.toml: Port of H2O instance for use by MLI. Each H2O node has an internal port (web port+1, so by default port 12349) for internal node-to-node communication" + }, + { + "prompt_type": "plain", + "instruction": ": What does ip do? : ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain ip. : ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ip", + "output": "ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ip", + "output": "ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "ip", + "output": "ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting ip", + "output": "ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting ip", + "output": "ip config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "plain", + "instruction": ": What does port do? : port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "plain", + "instruction": ": Explain port. : port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "port", + "output": "port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "port", + "output": "port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "port", + "output": "port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting port", + "output": "port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting port", + "output": "port config.toml: IP address and port for Driverless AI HTTP server." + }, + { + "prompt_type": "plain", + "instruction": ": What does port_range do? : port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "plain", + "instruction": ": Explain port_range. : port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "port_range", + "output": "port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "port_range", + "output": "port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "port range", + "output": "port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting port_range", + "output": "port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting port_range", + "output": "port range config.toml: A list of two integers indicating the port range to search over, and dynamically find an open port to bind to (e.g., [11111,20000])." + }, + { + "prompt_type": "plain", + "instruction": ": What does strict_version_check do? : strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "plain", + "instruction": ": Explain strict_version_check. : strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "strict_version_check", + "output": "strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "strict_version_check", + "output": "strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "strict version check", + "output": "strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting strict_version_check", + "output": "strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting strict_version_check", + "output": "strict version check config.toml: Strict version check for DAI" + }, + { + "prompt_type": "plain", + "instruction": ": What does max_file_upload_size do? : max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "plain", + "instruction": ": Explain max_file_upload_size. : max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_file_upload_size", + "output": "max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max_file_upload_size", + "output": "max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "max file upload size", + "output": "max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting max_file_upload_size", + "output": "max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting max_file_upload_size", + "output": "max file upload size config.toml: File upload limit (default 100GB)" + }, + { + "prompt_type": "plain", + "instruction": ": What does data_directory do? : data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_directory. : data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_directory", + "output": "data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_directory", + "output": "data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data directory", + "output": "data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_directory", + "output": "data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_directory", + "output": "data directory config.toml: Data directory. All application data and files related datasets and experiments are stored in this directory." + }, + { + "prompt_type": "plain", + "instruction": ": What does datasets_directory do? : datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "plain", + "instruction": ": Explain datasets_directory. : datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datasets_directory", + "output": "datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datasets_directory", + "output": "datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "datasets directory", + "output": "datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting datasets_directory", + "output": "datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting datasets_directory", + "output": "datasets directory config.toml: Datasets directory. If set, it will denote the location from which all datasets will be read from and written into, typically this location shall be configured to be on an external file system to allow for a more granular control to just the datasets volume. If empty then will default to data_directory." + }, + { + "prompt_type": "plain", + "instruction": ": What does data_connectors_logs_directory do? : data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "plain", + "instruction": ": Explain data_connectors_logs_directory. : data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_connectors_logs_directory", + "output": "data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data_connectors_logs_directory", + "output": "data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "data connectors logs directory", + "output": "data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting data_connectors_logs_directory", + "output": "data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting data_connectors_logs_directory", + "output": "data connectors logs directory config.toml: Path to the directory where the logs of HDFS, Hive, JDBC, and KDB+ data connectors will be saved." + }, + { + "prompt_type": "plain", + "instruction": ": What does server_logs_sub_directory do? : server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "plain", + "instruction": ": Explain server_logs_sub_directory. : server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "server_logs_sub_directory", + "output": "server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "server_logs_sub_directory", + "output": "server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "server logs sub directory", + "output": "server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting server_logs_sub_directory", + "output": "server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting server_logs_sub_directory", + "output": "server logs sub directory config.toml: Subdirectory within data_directory to store server logs." + }, + { + "prompt_type": "plain", + "instruction": ": What does pid_sub_directory do? : pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "plain", + "instruction": ": Explain pid_sub_directory. : pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pid_sub_directory", + "output": "pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pid_sub_directory", + "output": "pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "pid sub directory", + "output": "pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting pid_sub_directory", + "output": "pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting pid_sub_directory", + "output": "pid sub directory config.toml: Subdirectory within data_directory to store pid files for controlling kill/stop of DAI servers." + }, + { + "prompt_type": "plain", + "instruction": ": What does mapr_tickets_directory do? : mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain mapr_tickets_directory. : mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mapr_tickets_directory", + "output": "mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mapr_tickets_directory", + "output": "mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mapr tickets directory", + "output": "mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mapr_tickets_directory", + "output": "mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mapr_tickets_directory", + "output": "mapr tickets directory config.toml: Path to the directory which will be use to save MapR tickets when MapR multi-user mode is enabled. This is applicable only when enable_mapr_multi_user_mode is set to true. " + }, + { + "prompt_type": "plain", + "instruction": ": What does mapr_tickets_duration_minutes do? : mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain mapr_tickets_duration_minutes. : mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mapr_tickets_duration_minutes", + "output": "mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mapr_tickets_duration_minutes", + "output": "mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "mapr tickets duration minutes", + "output": "mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting mapr_tickets_duration_minutes", + "output": "mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting mapr_tickets_duration_minutes", + "output": "mapr tickets duration minutes config.toml: MapR tickets duration in minutes, if set to -1, it will use the default value (not specified in maprlogin command), otherwise will be the specified configuration value but no less than one day. " + }, + { + "prompt_type": "plain", + "instruction": ": What does remove_uploads_temp_files_server_start do? : remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain remove_uploads_temp_files_server_start. : remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove_uploads_temp_files_server_start", + "output": "remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove_uploads_temp_files_server_start", + "output": "remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove uploads temp files server start", + "output": "remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting remove_uploads_temp_files_server_start", + "output": "remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting remove_uploads_temp_files_server_start", + "output": "remove uploads temp files server start config.toml: Whether at server start to delete all temporary uploaded files, left over from failed uploads. " + }, + { + "prompt_type": "plain", + "instruction": ": What does remove_temp_files_server_start do? : remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain remove_temp_files_server_start. : remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove_temp_files_server_start", + "output": "remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove_temp_files_server_start", + "output": "remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove temp files server start", + "output": "remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting remove_temp_files_server_start", + "output": "remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting remove_temp_files_server_start", + "output": "remove temp files server start config.toml: Whether to run through entire data directory and remove all temporary files. Can lead to slow start-up time if have large number (much greater than 100) of experiments. " + }, + { + "prompt_type": "plain", + "instruction": ": What does remove_temp_files_aborted_experiments do? : remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain remove_temp_files_aborted_experiments. : remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove_temp_files_aborted_experiments", + "output": "remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove_temp_files_aborted_experiments", + "output": "remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "remove temp files aborted experiments", + "output": "remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting remove_temp_files_aborted_experiments", + "output": "remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting remove_temp_files_aborted_experiments", + "output": "remove temp files aborted experiments config.toml: Whether to delete temporary files after experiment is aborted/cancelled. " + }, + { + "prompt_type": "plain", + "instruction": ": What does usage_stats_opt_in do? : usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "plain", + "instruction": ": Explain usage_stats_opt_in. : usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "usage_stats_opt_in", + "output": "usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "usage_stats_opt_in", + "output": "usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "usage stats opt in", + "output": "usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting usage_stats_opt_in", + "output": "usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting usage_stats_opt_in", + "output": "usage stats opt in config.toml: Whether to opt in to usage statistics and bug reporting" + }, + { + "prompt_type": "plain", + "instruction": ": What does core_site_xml_path do? : core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "plain", + "instruction": ": Explain core_site_xml_path. : core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "core_site_xml_path", + "output": "core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "core_site_xml_path", + "output": "core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "core site xml path", + "output": "core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting core_site_xml_path", + "output": "core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting core_site_xml_path", + "output": "core site xml path config.toml: Configurations for a HDFS data source Path of hdfs coresite.xml core_site_xml_path is deprecated, please use hdfs_config_path" + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_config_path do? : hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_config_path. : hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_config_path", + "output": "hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_config_path", + "output": "hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs config path", + "output": "hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_config_path", + "output": "hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_config_path", + "output": "hdfs config path config.toml: (Required) HDFS config folder path. Can contain multiple config files." + }, + { + "prompt_type": "plain", + "instruction": ": What does key_tab_path do? : key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "plain", + "instruction": ": Explain key_tab_path. : key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "key_tab_path", + "output": "key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "key_tab_path", + "output": "key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "key tab path", + "output": "key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting key_tab_path", + "output": "key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting key_tab_path", + "output": "key tab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. key_tab_path is deprecated, please use hdfs_keytab_path " + }, + { + "prompt_type": "plain", + "instruction": ": What does hdfs_keytab_path do? : hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain hdfs_keytab_path. : hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_keytab_path", + "output": "hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs_keytab_path", + "output": "hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "hdfs keytab path", + "output": "hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting hdfs_keytab_path", + "output": "hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting hdfs_keytab_path", + "output": "hdfs keytab path config.toml: Path of the principal key tab file. Required when hdfs_auth_type='principal'. " + }, + { + "prompt_type": "plain", + "instruction": ": What does preview_cache_upon_server_exit do? : preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "plain", + "instruction": ": Explain preview_cache_upon_server_exit. : preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "preview_cache_upon_server_exit", + "output": "preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "preview_cache_upon_server_exit", + "output": "preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "preview cache upon server exit", + "output": "preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting preview_cache_upon_server_exit", + "output": "preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting preview_cache_upon_server_exit", + "output": "preview cache upon server exit config.toml: Whether to delete preview cache on server exit" + }, + { + "prompt_type": "plain", + "instruction": ": What does all_tasks_visible_to_users do? : all tasks visible to users config.toml: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "plain", + "instruction": ": Explain all_tasks_visible_to_users. : all tasks visible to users config.toml: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable users to see all tasks in task manager: . : Set the all tasks visible to users config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "all_tasks_visible_to_users", + "output": "all tasks visible to users config.toml: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "all_tasks_visible_to_users", + "output": "all tasks visible to users config.toml: Enable users to see all tasks in task manager: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "all tasks visible to users", + "output": "all tasks visible to users config.toml: Enable users to see all tasks in task manager: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable users to see all tasks in task manager: ", + "output": "all tasks visible to users config.toml: Enable users to see all tasks in task manager: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting all_tasks_visible_to_users", + "output": "all tasks visible to users config.toml: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting all_tasks_visible_to_users", + "output": "all tasks visible to users config.toml: Enable users to see all tasks in task manager: When this setting is enabled, any user can see all tasks running in the system, including their owner and an identification key. If this setting is turned off, user can see only their own tasks." + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_health_api do? : enable health api config.toml: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_health_api. : enable health api config.toml: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "plain", + "instruction": ": How can I do this: Enable Health API: . : Set the enable health api config.toml" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_health_api", + "output": "enable health api config.toml: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_health_api", + "output": "enable health api config.toml: Enable Health API: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable health api", + "output": "enable health api config.toml: Enable Health API: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "Enable Health API: ", + "output": "enable health api config.toml: Enable Health API: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_health_api", + "output": "enable health api config.toml: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_health_api", + "output": "enable health api config.toml: Enable Health API: When enabled, server exposes Health API at /apis/health/v1, which provides system overview and utilization statistics" + }, + { + "prompt_type": "plain", + "instruction": ": What does listeners_inherit_env_variables do? : listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain listeners_inherit_env_variables. : listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_inherit_env_variables", + "output": "listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_inherit_env_variables", + "output": "listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners inherit env variables", + "output": "listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting listeners_inherit_env_variables", + "output": "listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting listeners_inherit_env_variables", + "output": "listeners inherit env variables config.toml: When enabled, the notification scripts will inherit the parent's process (DriverlessAI) environment variables. " + }, + { + "prompt_type": "plain", + "instruction": ": What does listeners_experiment_start do? : listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "plain", + "instruction": ": Explain listeners_experiment_start. : listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_experiment_start", + "output": "listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_experiment_start", + "output": "listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners experiment start", + "output": "listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting listeners_experiment_start", + "output": "listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting listeners_experiment_start", + "output": "listeners experiment start config.toml: Notification scripts - the variable points to a location of script which is executed at given event in experiment lifecycle - the script should have executable flag enabled - use of absolute path is suggested The on experiment start notification script location" + }, + { + "prompt_type": "plain", + "instruction": ": What does listeners_experiment_done do? : listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "plain", + "instruction": ": Explain listeners_experiment_done. : listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_experiment_done", + "output": "listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_experiment_done", + "output": "listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners experiment done", + "output": "listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting listeners_experiment_done", + "output": "listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting listeners_experiment_done", + "output": "listeners experiment done config.toml: The on experiment finished notification script location" + }, + { + "prompt_type": "plain", + "instruction": ": What does listeners_mojo_done do? : listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain listeners_mojo_done. : listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_mojo_done", + "output": "listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_mojo_done", + "output": "listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners mojo done", + "output": "listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting listeners_mojo_done", + "output": "listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting listeners_mojo_done", + "output": "listeners mojo done config.toml: Notification script triggered when building of MOJO pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": What does listeners_autodoc_done do? : listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain listeners_autodoc_done. : listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_autodoc_done", + "output": "listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_autodoc_done", + "output": "listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners autodoc done", + "output": "listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting listeners_autodoc_done", + "output": "listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting listeners_autodoc_done", + "output": "listeners autodoc done config.toml: Notification script triggered when rendering of AutoDoc for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": What does listeners_scoring_pipeline_done do? : listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain listeners_scoring_pipeline_done. : listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_scoring_pipeline_done", + "output": "listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_scoring_pipeline_done", + "output": "listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners scoring pipeline done", + "output": "listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting listeners_scoring_pipeline_done", + "output": "listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting listeners_scoring_pipeline_done", + "output": "listeners scoring pipeline done config.toml: Notification script triggered when building of python scoring pipeline for experiment is finished. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": What does listeners_experiment_artifacts_done do? : listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain listeners_experiment_artifacts_done. : listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_experiment_artifacts_done", + "output": "listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners_experiment_artifacts_done", + "output": "listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "listeners experiment artifacts done", + "output": "listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting listeners_experiment_artifacts_done", + "output": "listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting listeners_experiment_artifacts_done", + "output": "listeners experiment artifacts done config.toml: Notification script triggered when experiment and all its artifacts selected at the beginning of experiment are finished building. The value should be an absolute path to executable script. " + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_quick_benchmark do? : enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_quick_benchmark. : enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_quick_benchmark", + "output": "enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_quick_benchmark", + "output": "enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable quick benchmark", + "output": "enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_quick_benchmark", + "output": "enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_quick_benchmark", + "output": "enable quick benchmark config.toml: Whether to run quick performance benchmark at start of application" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_extended_benchmark do? : enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_extended_benchmark. : enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_extended_benchmark", + "output": "enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_extended_benchmark", + "output": "enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable extended benchmark", + "output": "enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_extended_benchmark", + "output": "enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_extended_benchmark", + "output": "enable extended benchmark config.toml: Whether to run extended performance benchmark at start of application" + }, + { + "prompt_type": "plain", + "instruction": ": What does extended_benchmark_scale_num_rows do? : extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "plain", + "instruction": ": Explain extended_benchmark_scale_num_rows. : extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extended_benchmark_scale_num_rows", + "output": "extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extended_benchmark_scale_num_rows", + "output": "extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extended benchmark scale num rows", + "output": "extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting extended_benchmark_scale_num_rows", + "output": "extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting extended_benchmark_scale_num_rows", + "output": "extended benchmark scale num rows config.toml: Scaling factor for number of rows for extended performance benchmark. For rigorous performance benchmarking, values of 1 or larger are recommended." + }, + { + "prompt_type": "plain", + "instruction": ": What does extended_benchmark_num_cols do? : extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "plain", + "instruction": ": Explain extended_benchmark_num_cols. : extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extended_benchmark_num_cols", + "output": "extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extended_benchmark_num_cols", + "output": "extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "extended benchmark num cols", + "output": "extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting extended_benchmark_num_cols", + "output": "extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting extended_benchmark_num_cols", + "output": "extended benchmark num cols config.toml: Number of columns for extended performance benchmark." + }, + { + "prompt_type": "plain", + "instruction": ": What does benchmark_memory_timeout do? : benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "plain", + "instruction": ": Explain benchmark_memory_timeout. : benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_memory_timeout", + "output": "benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_memory_timeout", + "output": "benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark memory timeout", + "output": "benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting benchmark_memory_timeout", + "output": "benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting benchmark_memory_timeout", + "output": "benchmark memory timeout config.toml: Seconds to allow for testing memory bandwidth by generating numpy frames" + }, + { + "prompt_type": "plain", + "instruction": ": What does benchmark_memory_vm_fraction do? : benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "plain", + "instruction": ": Explain benchmark_memory_vm_fraction. : benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_memory_vm_fraction", + "output": "benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_memory_vm_fraction", + "output": "benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark memory vm fraction", + "output": "benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting benchmark_memory_vm_fraction", + "output": "benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting benchmark_memory_vm_fraction", + "output": "benchmark memory vm fraction config.toml: Maximum portion of vm total to use for numpy memory benchmark" + }, + { + "prompt_type": "plain", + "instruction": ": What does benchmark_memory_max_cols do? : benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "plain", + "instruction": ": Explain benchmark_memory_max_cols. : benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_memory_max_cols", + "output": "benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark_memory_max_cols", + "output": "benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "benchmark memory max cols", + "output": "benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting benchmark_memory_max_cols", + "output": "benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting benchmark_memory_max_cols", + "output": "benchmark memory max cols config.toml: Maximum number of columns to use for numpy memory benchmark" + }, + { + "prompt_type": "plain", + "instruction": ": What does enable_startup_checks do? : enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "plain", + "instruction": ": Explain enable_startup_checks. : enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_startup_checks", + "output": "enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable_startup_checks", + "output": "enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "enable startup checks", + "output": "enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting enable_startup_checks", + "output": "enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting enable_startup_checks", + "output": "enable startup checks config.toml: Whether to run quick startup checks at start of application" + }, + { + "prompt_type": "plain", + "instruction": ": What does application_id do? : application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "plain", + "instruction": ": Explain application_id. : application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "application_id", + "output": "application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "application_id", + "output": "application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "application id", + "output": "application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting application_id", + "output": "application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting application_id", + "output": "application id config.toml: Application ID override, which should uniquely identify the instance" + }, + { + "prompt_type": "plain", + "instruction": ": What does db_backend do? : db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "plain", + "instruction": ": Explain db_backend. : db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "db_backend", + "output": "db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "db_backend", + "output": "db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "db backend", + "output": "db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting db_backend", + "output": "db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting db_backend", + "output": "db backend config.toml: Specifies the DB backend which application uses. Possible options are: - *legacy* - Uses legacy SQLite with entity JSON blobs - *sqlite* - Uses relational SQLite separate entity tables" + }, + { + "prompt_type": "plain", + "instruction": ": What does main_server_fork_timeout do? : main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain main_server_fork_timeout. : main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_fork_timeout", + "output": "main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main_server_fork_timeout", + "output": "main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "main server fork timeout", + "output": "main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting main_server_fork_timeout", + "output": "main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting main_server_fork_timeout", + "output": "main server fork timeout config.toml: After how many seconds to abort MLI recipe execution plan or recipe compatibility checks. Blocks main server from all activities, so long timeout is not desired, esp. in case of hanging processes, while a short timeout can too often lead to abortions on busy system. " + }, + { + "prompt_type": "plain", + "instruction": ": What does audit_log_retention_period do? : audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain audit_log_retention_period. : audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "audit_log_retention_period", + "output": "audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "audit_log_retention_period", + "output": "audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "audit log retention period", + "output": "audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting audit_log_retention_period", + "output": "audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting audit_log_retention_period", + "output": "audit log retention period config.toml: After how many days the audit log records are removed. Set equal to 0 to disable removal of old records. " + }, + { + "prompt_type": "plain", + "instruction": ": What does dataset_tmp_upload_file_retention_time_min do? : dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + }, + { + "prompt_type": "plain", + "instruction": ": Explain dataset_tmp_upload_file_retention_time_min. : dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dataset_tmp_upload_file_retention_time_min", + "output": "dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dataset_tmp_upload_file_retention_time_min", + "output": "dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "dataset tmp upload file retention time min", + "output": "dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + }, + { + "prompt_type": "human_bot", + "instruction": "Explain the following expert setting for Driverless AI", + "input": "", + "output": "dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a short explanation of the expert setting dataset_tmp_upload_file_retention_time_min", + "output": "dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + }, + { + "prompt_type": "human_bot", + "instruction": "Provide a detailed explanation of the expert setting dataset_tmp_upload_file_retention_time_min", + "output": "dataset tmp upload file retention time min config.toml: Time to wait after performing a cleanup of temporary files for in-browser dataset upload. " + } +] \ No newline at end of file