Spaces:
Configuration error
Configuration error
File size: 1,916 Bytes
447ebeb |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 |
"""
Translate from OpenAI's `/v1/chat/completions` to Perplexity's `/v1/chat/completions`
"""
from typing import Optional, Tuple
import litellm
from litellm._logging import verbose_logger
from litellm.secret_managers.main import get_secret_str
from ...openai.chat.gpt_transformation import OpenAIGPTConfig
class PerplexityChatConfig(OpenAIGPTConfig):
@property
def custom_llm_provider(self) -> Optional[str]:
return "perplexity"
def _get_openai_compatible_provider_info(
self, api_base: Optional[str], api_key: Optional[str]
) -> Tuple[Optional[str], Optional[str]]:
api_base = api_base or get_secret_str("PERPLEXITY_API_BASE") or "https://api.perplexity.ai" # type: ignore
dynamic_api_key = (
api_key
or get_secret_str("PERPLEXITYAI_API_KEY")
or get_secret_str("PERPLEXITY_API_KEY")
)
return api_base, dynamic_api_key
def get_supported_openai_params(self, model: str) -> list:
"""
Perplexity supports a subset of OpenAI params
Ref: https://docs.perplexity.ai/api-reference/chat-completions
Eg. Perplexity does not support tools, tool_choice, function_call, functions, etc.
"""
base_openai_params = [
"frequency_penalty",
"max_tokens",
"max_completion_tokens",
"presence_penalty",
"response_format",
"stream",
"temperature",
"top_p",
"max_retries",
"extra_headers",
]
try:
if litellm.supports_reasoning(
model=model, custom_llm_provider=self.custom_llm_provider
):
base_openai_params.append("reasoning_effort")
except Exception as e:
verbose_logger.debug(f"Error checking if model supports reasoning: {e}")
return base_openai_params
|