distilabel:
  version: 1.2.0
pipeline:
  name: generate_embedding_queries
  description: null
  steps:
  - step:
      name: zenml-docs-0-60-0
      input_mappings: {}
      output_mappings:
        page_content: anchor
      batch_size: 50
      repo_id: zenml/rag_qa_embedding_questions_0_60_0
      split: train
      config: null
      streaming: false
      num_examples: null
      storage_options: null
      runtime_parameters_info:
      - name: batch_size
        optional: true
        description: The number of rows that will contain the batches generated by
          the step.
      - name: repo_id
        optional: false
        description: The Hugging Face Hub repository ID of the dataset to load.
      - name: split
        optional: true
        description: The split of the dataset to load. Defaults to 'train'.
      - name: config
        optional: true
        description: The configuration of the dataset to load. This is optional and
          only needed if the dataset has multiple configurations.
      - name: streaming
        optional: true
        description: Whether to load the dataset in streaming mode or not. Defaults
          to False.
      - name: num_examples
        optional: true
        description: The number of examples to load from the dataset. By default will
          load all examples.
      type_info:
        module: distilabel.steps.generators.huggingface
        name: LoadDataFromHub
    name: zenml-docs-0-60-0
  - step:
      name: generate_sentence_pair_0
      input_mappings: {}
      output_mappings: {}
      input_batch_size: 10
      llm:
        generation_kwargs:
          temperature: 0.7
          max_new_tokens: 512
        model: gpt-4o
        base_url: https://api.openai.com/v1
        max_retries: 6
        timeout: 120
        structured_output: null
        type_info:
          module: distilabel.llms.openai
          name: OpenAILLM
      group_generations: false
      add_raw_output: true
      num_generations: 1
      triplet: true
      action: query
      context: The text is a chunk from ZenML's technical documentation. Along with
        prose explanations, the text chunk may include code snippets and logs but
        these are identifiable from the surrounding backticks.
      runtime_parameters_info:
      - name: input_batch_size
        optional: true
        description: The number of rows that will contain the batches processed by
          the step.
      - name: llm
        runtime_parameters_info:
        - name: generation_kwargs
          description: The kwargs to be propagated to either `generate` or `agenerate`
            methods within each `LLM`.
          keys:
          - name: max_new_tokens
            optional: true
          - name: frequency_penalty
            optional: true
          - name: presence_penalty
            optional: true
          - name: temperature
            optional: true
          - name: top_p
            optional: true
          - name: stop
            optional: true
          - name: response_format
            optional: true
        - name: base_url
          optional: true
          description: The base URL to use for the OpenAI API requests.
        - name: api_key
          optional: true
          description: The API key to authenticate the requests to the OpenAI API.
        - name: max_retries
          optional: true
          description: The maximum number of times to retry the request to the API
            before failing.
        - name: timeout
          optional: true
          description: The maximum time in seconds to wait for a response from the
            API.
        - name: structured_output
          optional: true
          description: The structured output format to use across all the generations.
      - name: add_raw_output
        optional: true
        description: Whether to include the raw output of the LLM in the key `raw_output_<TASK_NAME>`
          of the `distilabel_metadata` dictionary output column
      - name: num_generations
        optional: true
        description: The number of generations to be produced per input.
      type_info:
        module: distilabel.steps.tasks.sentence_transformers
        name: GenerateSentencePair
    name: generate_sentence_pair_0
  connections:
  - from: zenml-docs-0-60-0
    to:
    - generate_sentence_pair_0
  - from: generate_sentence_pair_0
    to: []
  routing_batch_functions: []
  type_info:
    module: distilabel.pipeline.local
    name: Pipeline