# Prompts for NeMo LLM models.
prompts:
  - task: general
    models:
      - nemollm
    content: |-
      {{ general_instructions }}

      {{ history | user_assistant_sequence_nemollm }}
      Assistant:

  # Prompt for detecting the user message canonical form.
  - task: generate_user_intent
    models:
      - nemollm
    content: |-
      <extra_id_0>System
      {{ general_instructions }}
      Your task is to generate a short summary called user intent for an user message in a conversation.

      <extra_id_0>System
      # This how the user talks, use these examples to generate the user intent:
      {{ examples | to_messages_nemollm }}

      <extra_id_0>System
      # This is the current conversation between the user and the bot:
      {{ sample_conversation | first_turns(2) | to_messages_nemollm }}
      {{ history | colang | to_messages_nemollm }}
      <extra_id_1>Assistant

    output_parser: "verbose_v1"

  # Prompt for generating the next steps.
  - task: generate_next_steps
    models:
      - nemollm
    content: |-
      <extra_id_0>System
      {{ general_instructions }}

      <extra_id_0>System
      # This is how a conversation between a user and the bot can go:
      {{ sample_conversation | to_messages_nemollm }}

      <extra_id_0>System
      # This is how the bot thinks, use these examples to generate the bot canonical form:
      {{ examples | to_messages_nemollm }}

      <extra_id_0>System
      # This is the current conversation between the user and the bot:
      {{ sample_conversation | first_turns(2) | to_messages_nemollm }}
      {{ history | colang | to_messages_nemollm }}

    output_parser: "verbose_v1"

  # Prompt for generating the bot message from a canonical form.
  - task: generate_bot_message
    models:
      - nemollm
    content: |-
      <extra_id_0>System
      {{ general_instructions }}

      <extra_id_0>System
      # This is how a conversation between a user and the bot can go:
      {{ sample_conversation | to_messages_nemollm }}

      {% if relevant_chunks %}
      <extra_id_0>System
      # This is some additional context:
      ```markdown
      {{ relevant_chunks }}
      ```
      {% endif %}

      <extra_id_0>System
      # This is how the bot talks:
      {{ examples | to_messages_nemollm }}

      <extra_id_0>System
      # This is the current conversation between the user and the bot:
      {{ sample_conversation | first_turns(2) | to_messages_nemollm }}
      {{ history | colang | to_messages_nemollm }}

    output_parser: "verbose_v1"

  # Prompt for generating the value of a context variable.
  - task: generate_value
    models:
      - nemollm
    content: |-
      <extra_id_0>System
      {{ general_instructions }}

      <extra_id_0>System
      # This is how a conversation between a user and the bot can go:
      {{ sample_conversation | to_messages_nemollm }}

      <extra_id_0>System
      # This is how the bot thinks:
      {{ examples | verbose_v1 }}

      <extra_id_0>System
      # This is the current conversation between the user and the bot:
      {{ sample_conversation | first_turns(2) | to_messages_nemollm }}
      {{ history | colang | to_messages_nemollm }}

      <extra_id_0>System
      # {{ instructions }}
      <extra_id_1>Assistant
      ${{ var_name }} =
    output_parser: "verbose_v1"

  - task: fact_checking
    models:
      - nemollm
    content: |-
      You are given a task to identify if the hypothesis is grounded and entailed to the evidence.
      You will only use the contents of the evidence and not rely on external knowledge.
      Answer with yes/no. "evidence": {{ evidence }} "hypothesis": {{ response }} "entails":


  - task: check_hallucination
    models:
      - nemollm
    content: |-
      You are given a task to identify if the hypothesis is in agreement with the context below.
      You will only use the contents of the context and not rely on external knowledge.
      Answer with yes/no. "context": {{ paragraph }} "hypothesis": {{ statement }} "agreement":


  # Prompts for compact mode for dialogue rails
  # Prompt for detecting the user message canonical form in compact form.
  - task: generate_user_intent
    models:
      - nemollm
    mode: compact
    content: |-
      <extra_id_0>System
      Task: generate the user intent for the last user message.
      This is a sample conversation between the user and the bot:
      {{ sample_conversation | first_turns(2) | to_messages_nemollm }}
      <extra_id_0>System
      Use these examples:
      {{ examples | to_messages_nemollm }}
      <extra_id_0>System
      Choose intent from this list: {{ potential_user_intents }}
      {{ history | colang | last_turns(1) | to_messages_nemollm }}<extra_id_1>Assistant

    output_parser: "verbose_v1"

  # Prompt for generating the next steps in compact form.
  - task: generate_next_steps
    models:
      - nemollm
    mode: compact
    content: |-
      <extra_id_0>System
      Task: generate the next steps in a conversation.
      Use these examples:
      {{ examples | to_messages_nemollm }}
      <extra_id_0>System
      This is the current conversation between the user and the bot:
      {{ sample_conversation | first_turns(2) | to_messages_nemollm }}
      {{ history | colang | last_turns(1) | to_messages_nemollm }}

    output_parser: "verbose_v1"

  # Prompt for generating the bot message from a canonical form in compact form.
  - task: generate_bot_message
    models:
      - nemollm
    mode: compact
    content: |-
      <extra_id_0>System
      Task: generate the bot message in a conversation.
      {% if relevant_chunks %}
      # This is some additional context:
      ```markdown
      {{ relevant_chunks }}
      ```
      {% endif %}
      <extra_id_0>System
      Use these examples and continue the conversation:
      {{ examples | to_messages_nemollm }}
      <extra_id_0>System
      This is the current conversation between the user and the bot:
      {{ sample_conversation | first_turns(2) | to_messages_nemollm }}
      {{ history | colang |  last_turns(3) | to_messages_nemollm }}

    output_parser: "verbose_v1"
