{
  "add_plugin": "Add Plugin",
  "back_to_chat_list": "Back to chat list",
  "chat_date": "{{val, datetime}}",
  "config_title": "Chat configuration",
  "delete_chat": "Delete chat",
  "delete_confirmation": "Are you sure you want to delete this chat?",
  "delete_confirmation_detail": "If you delete this chat, it won't be part of our data, and we won't be able to use it to improve our models. Please take the time to upvote and downvote responses in other chats to help us make Open Assistant better!",
  "draft": "Draft",
  "drafts_generating_notify": "Draft messages are still generating. Please wait.",
  "edit_plugin": "Edit Plugin",
  "empty": "Untitled",
  "input_placeholder": "Ask the assistant anything",
  "login_message": "To use this feature, you need to login again. Login using one of these providers:",
  "max_new_tokens": "Max new tokens",
  "model": "Model",
  "more_actions": "More Actions",
  "only_visible": "Only visible",
  "opt_out": {
    "button": "Opt out of training data",
    "success_message": "You have opted out of training data.",
    "dialog": {
      "title": "Are you sure you want to opt this chat out of the training data?",
      "description": "If you confirm, this chat won't be used for improving our model. Please take the time to upvote and downvote responses in other chats to help us make Open Assistant better!"
    }
  },
  "parameter_description": {
    "max_new_tokens": "Max new tokens: This parameter tells the model how many new tokens it should generate at most for the response.",
    "repetition_penalty": "Repetition Penalty: This parameter reduces the probability of repeating the same tokens again and again by making repeated tokens less likely than the model would ordinarily predict.",
    "temperature": "Temperature: Each token you generate is sampled from a distribution p(next_token|previous_tokens). The temperature parameter can \"sharpen\" or dampen this distribution. Setting it to 1 means that the model generates tokens based on their predicted probability (i.e., if the model predicts that \"XYZ\" has a probability of 12.3%, it will generate it with a 12.3% likelihood). Lowering the temperature towards zero makes the model more greedy, causing high probabilities to get even higher and low probabilities to get even lower (note that this is not a linear relationship!). Increasing the temperature makes all probabilities more similar. Intuitively, a low temperature means that the model generates responses that align closely with its beliefs, while a high temperature allows for more creative and diverse responses.",
    "top_k": "Top-k: This is similar to top-p sampling, but instead of taking the top tokens until their cumulative probability exceeds 'p', it only takes the K most probable tokens. Top-p is usually preferred since it allows the model to 'tune' the search radius, but top-k can be useful as an emergency break when the model has no idea what to generate next and assigns a very uniform distribution to many tokens.",
    "top_p": "Top-p (also known as nucleus) sampling: This method reduces the probability distribution to only look at the top-p percent of tokens. By discarding low probability tokens, it helps to bound the generation and prevent the model from generating grammatically incorrect sentences.",
    "typical_p": "Typical p: Typical sampling is an information-theoretic technique that, in addition to the probability, also considers the sequence entropy (i.e., the information content according to the probability). This means that typical sampling \"overweights\" some of the tokens with lower probability because they are deemed \"interesting,\" and underweights high probability tokens because they are deemed \"boring.\""
  },
  "plugin_url_placeholder": "Enter plugin URL",
  "plugin_repositories": "Plugin Repositories",
  "plugins": "Plugins",
  "preset": "Preset",
  "preset_custom": "Custom",
  "queue_info": "Your message is queued, you are at position {{ queuePosition, number, integer }} in the queue.",
  "remove_plugin": "Remove Plugin",
  "repetition_penalty": "Repetition penalty",
  "select_chat_notify": "Please select a draft to continue.",
  "sponsored_by": "Sponsored By",
  "temperature": "Temperature",
  "top_k": "Top K",
  "top_p": "Top P",
  "typical_p": "Typical P",
  "unverified_plugin": "UNVERIFIED",
  "unverified_plugin_description": "This plugin has not been verified by the Open Assistant team. Use at your own risk.",
  "used": "Used",
  "verified_plugin": "VERIFIED",
  "verified_plugin_description": "This plugin has been verified by the Open Assistant team.",
  "view_plugin": "View Plugin",
  "visible_hidden": "Visible & hidden",
  "warning": "This Assistant is a demonstration without full internet access. It may generate incorrect or misleading information. It is not suitable for important use cases or giving advice.",
  "you_are_logged_in": "You are logged in to the chat service",
  "your_chats": "Your Chats",
  "save_preset": "Save this preset",
  "preset_exists_error": "A preset with this name already exists",
  "preset_name_placeholder": "Enter name",
  "feedback_message": "How did I do? Your feedback will make me better!",
  "feedback_action_great": "Good",
  "feedback_action_poor": "Could be better",
  "custom_instructions": "Custom instructions",
  "custom_instructions_user_profile": "What info should Open-Assistant have about you to make its replies even better?",
  "custom_instructions_response_instructions": "How do you want Open-Assistant to chat with you?",
  "custom_instructions_user_profile_placeholder": "List some of your aspirations.\nDescribe your hobbies and interests.\nShare your location.\nWhat is your occupation?\nWhich topics could you discuss extensively?",
  "custom_instructions_response_instructions_placeholder": "Should Open-Assistant express opinions or maintain neutrality?\nSpecify the desired formality level for Open-Assistant's responses.\nHow should Open-Assistant address you?\nDetermine the preferred length of responses."
}
