# Configuration version (required) version: 1.0.3 # Cache settings: Set to true to enable caching cache: true # Definition of custom endpoints endpoints: # assistants: # disableBuilder: false # Disable Assistants Builder Interface by setting to `true` # pollIntervalMs: 750 # Polling interval for checking assistant updates # timeoutMs: 180000 # Timeout for assistant operations # # Should only be one or the other, either `supportedIds` or `excludedIds` # supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"] # # excludedIds: ["asst_excludedAssistantId"] custom: #groq - name: "groq" apiKey: "${GROQ_API_KEY}" baseURL: "https://api.groq.com/openai/v1/" models: default: [ "llama2-70b-4096", "mixtral-8x7b-32768", "gemma-7b-it" ] fetch: false titleConvo: true titleModel: "mixtral-8x7b-32768" summarize: false summaryModel: "mixtral-8x7b-32768" forcePrompt: false modelDisplayLabel: "groq" # Mistral AI API - name: "Mistral" apiKey: "${MISTRAL_API_KEY}" baseURL: "https://api.mistral.ai/v1" models: default: [ "mistral-tiny", "mistral-small", "mistral-medium", "mistral-large-latest" ] fetch: false titleConvo: true titleMethod: "completion" titleModel: "mistral-tiny" summarize: false summaryModel: "mistral-tiny" forcePrompt: false modelDisplayLabel: "Mistral" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] # Preplexity - name: "Perplexity" apiKey: "user_provided" baseURL: "https://api.perplexity.ai/" models: default: [ "mistral-7b-instruct", "sonar-small-chat", "sonar-small-online", "sonar-medium-chat", "sonar-medium-online" ] fetch: false # fetching list of models is not supported titleConvo: true titleModel: "sonar-medium-chat" summarize: false summaryModel: "sonar-medium-chat" forcePrompt: false dropParams: ["stop", "frequency_penalty"] modelDisplayLabel: "Perplexity"