model_list: - model_name: "openai-model" litellm_params: model: "gpt-3.5-turbo" litellm_settings: cache: True cache_params: supported_call_types: ["embedding", "aembedding"] host: "localhost"