# Configuration version (required) version: 1.1.5 # Cache settings: Set to true to enable caching cache: true registration: allowedDomains: - "gmail.com" # Definition of custom endpoints endpoints: # assistants: # disableBuilder: false # Disable Assistants Builder Interface by setting to `true` # pollIntervalMs: 750 # Polling interval for checking assistant updates # timeoutMs: 180000 # Timeout for assistant operations # # Should only be one or the other, either `supportedIds` or `excludedIds` # supportedIds: ["asst_supportedAssistantId1", "asst_supportedAssistantId2"] # # excludedIds: ["asst_excludedAssistantId"] custom: #groq - name: "groq" apiKey: "${GROQ_API_KEY}" baseURL: "https://api.groq.com/openai/v1/" models: default: [ "gemma-7b-it" ] fetch: true titleConvo: true titleModel: "mixtral-8x7b-32768" summarize: false summaryModel: "mixtral-8x7b-32768" forcePrompt: false modelDisplayLabel: "groq" # Mistral AI API - name: "Mistral" apiKey: "${MISTRAL_API_KEY}" baseURL: "https://api.mistral.ai/v1" models: default: [ "mistral-small-latest", "mistral-medium-latest", "mistral-large-latest" ] fetch: false titleConvo: true titleMethod: "completion" titleModel: "open-mistral-7b" summarize: false summaryModel: "open-mistral-7b" forcePrompt: false modelDisplayLabel: "Mistral" dropParams: ["stop", "user", "frequency_penalty", "presence_penalty"] # Preplexity - name: "Perplexity" apiKey: "user_provided" baseURL: "https://api.perplexity.ai/" models: default: [ "mistral-7b-instruct", "sonar-small-chat", "sonar-small-online", "sonar-medium-chat", "sonar-medium-online" ] fetch: false # fetching list of models is not supported titleConvo: true titleModel: "sonar-medium-chat" summarize: false summaryModel: "sonar-medium-chat" forcePrompt: false dropParams: ["stop", "frequency_penalty"] modelDisplayLabel: "Perplexity" # OpenRouter - name: 'OpenRouter' # Known issue: you should not use `OPENROUTER_API_KEY` as it will then override the `openAI` endpoint to use OpenRouter as well. apiKey: '${OPENROUTER_KEY}' baseURL: 'https://openrouter.ai/api/v1' models: default: ['meta-llama/llama-3-8b-instruct'] fetch: true titleConvo: true titleModel: 'meta-llama/llama-3-8b-instruct' # Recommended: Drop the stop parameter from the request as Openrouter models use a variety of stop tokens. dropParams: ['stop'] modelDisplayLabel: 'OpenRouter'