File size: 4,530 Bytes
9ac4bad 8fe773a 9ac4bad 8fe773a 9ac4bad |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 |
distilabel:
version: 1.2.0
pipeline:
name: generate_embedding_queries
description: null
steps:
- step:
name: zenml-docs-0-60-0
input_mappings: {}
output_mappings:
page_content: anchor
batch_size: 50
repo_id: zenml/rag_qa_embedding_questions_0_60_0
split: train
config: null
streaming: false
num_examples: null
storage_options: null
runtime_parameters_info:
- name: batch_size
optional: true
description: The number of rows that will contain the batches generated by
the step.
- name: repo_id
optional: false
description: The Hugging Face Hub repository ID of the dataset to load.
- name: split
optional: true
description: The split of the dataset to load. Defaults to 'train'.
- name: config
optional: true
description: The configuration of the dataset to load. This is optional and
only needed if the dataset has multiple configurations.
- name: streaming
optional: true
description: Whether to load the dataset in streaming mode or not. Defaults
to False.
- name: num_examples
optional: true
description: The number of examples to load from the dataset. By default will
load all examples.
type_info:
module: distilabel.steps.generators.huggingface
name: LoadDataFromHub
name: zenml-docs-0-60-0
- step:
name: generate_sentence_pair_0
input_mappings: {}
output_mappings: {}
input_batch_size: 10
llm:
generation_kwargs:
temperature: 0.7
max_new_tokens: 512
model: gpt-4o
base_url: https://api.openai.com/v1
max_retries: 6
timeout: 120
structured_output: null
type_info:
module: distilabel.llms.openai
name: OpenAILLM
group_generations: false
add_raw_output: true
num_generations: 1
triplet: true
action: query
context: The text is a chunk from ZenML's technical documentation. Along with
prose explanations, the text chunk may include code snippets and logs but
these are identifiable from the surrounding backticks.
runtime_parameters_info:
- name: input_batch_size
optional: true
description: The number of rows that will contain the batches processed by
the step.
- name: llm
runtime_parameters_info:
- name: generation_kwargs
description: The kwargs to be propagated to either `generate` or `agenerate`
methods within each `LLM`.
keys:
- name: max_new_tokens
optional: true
- name: frequency_penalty
optional: true
- name: presence_penalty
optional: true
- name: temperature
optional: true
- name: top_p
optional: true
- name: stop
optional: true
- name: response_format
optional: true
- name: base_url
optional: true
description: The base URL to use for the OpenAI API requests.
- name: api_key
optional: true
description: The API key to authenticate the requests to the OpenAI API.
- name: max_retries
optional: true
description: The maximum number of times to retry the request to the API
before failing.
- name: timeout
optional: true
description: The maximum time in seconds to wait for a response from the
API.
- name: structured_output
optional: true
description: The structured output format to use across all the generations.
- name: add_raw_output
optional: true
description: Whether to include the raw output of the LLM in the key `raw_output_<TASK_NAME>`
of the `distilabel_metadata` dictionary output column
- name: num_generations
optional: true
description: The number of generations to be produced per input.
type_info:
module: distilabel.steps.tasks.sentence_transformers
name: GenerateSentencePair
name: generate_sentence_pair_0
connections:
- from: zenml-docs-0-60-0
to:
- generate_sentence_pair_0
- from: generate_sentence_pair_0
to: []
routing_batch_functions: []
type_info:
module: distilabel.pipeline.local
name: Pipeline
|