diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/batch.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/batch.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53b50aec5c8151f8b17dbbf272210355004dcc2f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/batch.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6a4a3c610d18d27b7f8e5c96eccc676bac9d703b Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_create_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_create_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7281b8ebca245fea766ac72ddea6921c79c1ae97 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_create_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_usage.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_usage.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..31957eb456882fb4bcac70e67e4f8242f01a8e60 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_usage.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..38043b3769656bccfab766dbe085d04d7071f717 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_create_variation_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_create_variation_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..8e268742f860d83d246ec93b3572b89dbcd73a7a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_create_variation_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_edit_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_edit_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..9471f405e78a7ffd93b6ad253ac7f626f198eaa5 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_edit_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..53dafef2ffb0bb575de05a887c147345199b2f15 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation_multi_modal_input_param.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation_multi_modal_input_param.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..da8c0781a1a723809c8cc6671a18ab685235218a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation_multi_modal_input_param.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/__pycache__/upload_create_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/upload_create_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..597941273874d54c1eeeafa46bb03a74b816053f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/__pycache__/upload_create_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__init__.py b/.venv/lib/python3.11/site-packages/openai/types/chat/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..c623a982afe1690b549b42730c14ce76d3fdce5b --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/__init__.py @@ -0,0 +1,67 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .chat_completion import ChatCompletion as ChatCompletion +from .chat_completion_role import ChatCompletionRole as ChatCompletionRole +from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio +from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk +from .parsed_chat_completion import ( + ParsedChoice as ParsedChoice, + ParsedChatCompletion as ParsedChatCompletion, + ParsedChatCompletionMessage as ParsedChatCompletionMessage, +) +from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage +from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality +from .completion_create_params import CompletionCreateParams as CompletionCreateParams +from .parsed_function_tool_call import ( + ParsedFunction as ParsedFunction, + ParsedFunctionToolCall as ParsedFunctionToolCall, +) +from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam +from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam +from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam +from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob +from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort +from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall +from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam +from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam +from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam +from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam +from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam +from .chat_completion_function_message_param import ( + ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam, +) +from .chat_completion_assistant_message_param import ( + ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam, +) +from .chat_completion_content_part_text_param import ( + ChatCompletionContentPartTextParam as ChatCompletionContentPartTextParam, +) +from .chat_completion_developer_message_param import ( + ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam, +) +from .chat_completion_message_tool_call_param import ( + ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam, +) +from .chat_completion_named_tool_choice_param import ( + ChatCompletionNamedToolChoiceParam as ChatCompletionNamedToolChoiceParam, +) +from .chat_completion_content_part_image_param import ( + ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam, +) +from .chat_completion_prediction_content_param import ( + ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam, +) +from .chat_completion_tool_choice_option_param import ( + ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam, +) +from .chat_completion_content_part_refusal_param import ( + ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam, +) +from .chat_completion_function_call_option_param import ( + ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam, +) +from .chat_completion_content_part_input_audio_param import ( + ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam, +) diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..b03ca4aa6e26907eabe81b35fd05377b6a6a3d4e Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..13b643e7c31db18c647888cc1a46c4c01c086182 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio_param.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio_param.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f4db8f18ba9fbe5579292855b876e782a38a72dc Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio_param.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_chunk.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_chunk.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..80daa8ddd0e47fa2f474f9f606a4ca0de74815c6 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_chunk.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..7f4c391b944d62b309bb4960f76f02df0974abd4 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..d7283d32518d974f7dbd97c748669edcad577ad9 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..829982bae65f40a5404586c0c39ddf2fc837b1e8 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message_param.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message_param.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..e9e58a3f7131770d6306008d51dc9c8b7801e2ca Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message_param.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..70600a13005c2348fd2a8ed5a58d5a5b76d2e2d2 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6818f9c3c6d271b64992d850ae7a3438208f2c3a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion.py new file mode 100644 index 0000000000000000000000000000000000000000..cb812a2702cdc23614f5ed0855ec61c589d2bbd6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion.py @@ -0,0 +1,73 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from ..completion_usage import CompletionUsage +from .chat_completion_message import ChatCompletionMessage +from .chat_completion_token_logprob import ChatCompletionTokenLogprob + +__all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"] + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + + +class Choice(BaseModel): + finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"] + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, `length` if the maximum number of tokens specified in the request was + reached, `content_filter` if content was omitted due to a flag from our content + filters, `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + message: ChatCompletionMessage + """A chat completion message generated by the model.""" + + +class ChatCompletion(BaseModel): + id: str + """A unique identifier for the chat completion.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can be more than one if `n` is greater than 1. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created.""" + + model: str + """The model used for the chat completion.""" + + object: Literal["chat.completion"] + """The object type, which is always `chat.completion`.""" + + service_tier: Optional[Literal["scale", "default"]] = None + """The service tier used for processing the request.""" + + system_fingerprint: Optional[str] = None + """This fingerprint represents the backend configuration that the model runs with. + + Can be used in conjunction with the `seed` request parameter to understand when + backend changes have been made that might impact determinism. + """ + + usage: Optional[CompletionUsage] = None + """Usage statistics for the completion request.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_assistant_message_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_assistant_message_param.py new file mode 100644 index 0000000000000000000000000000000000000000..35e3a3d7843979bbd64d69eb6e2751dd36f39a4e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_assistant_message_param.py @@ -0,0 +1,70 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam +from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam +from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam + +__all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"] + + +class Audio(TypedDict, total=False): + id: Required[str] + """Unique identifier for a previous audio response from the model.""" + + +ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam] + + +class FunctionCall(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class ChatCompletionAssistantMessageParam(TypedDict, total=False): + role: Required[Literal["assistant"]] + """The role of the messages author, in this case `assistant`.""" + + audio: Optional[Audio] + """Data about a previous audio response from the model. + + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + + content: Union[str, Iterable[ContentArrayOfContentPart], None] + """The contents of the assistant message. + + Required unless `tool_calls` or `function_call` is specified. + """ + + function_call: Optional[FunctionCall] + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the + model. + """ + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ + + refusal: Optional[str] + """The refusal message by the assistant.""" + + tool_calls: Iterable[ChatCompletionMessageToolCallParam] + """The tool calls generated by the model, such as function calls.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio.py new file mode 100644 index 0000000000000000000000000000000000000000..dd15508ebbebc27aadddc803e32c9c2a158ac99c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + + +from ..._models import BaseModel + +__all__ = ["ChatCompletionAudio"] + + +class ChatCompletionAudio(BaseModel): + id: str + """Unique identifier for this audio response.""" + + data: str + """ + Base64 encoded audio bytes generated by the model, in the format specified in + the request. + """ + + expires_at: int + """ + The Unix timestamp (in seconds) for when this audio response will no longer be + accessible on the server for use in multi-turn conversations. + """ + + transcript: str + """Transcript of the audio generated by the model.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio_param.py new file mode 100644 index 0000000000000000000000000000000000000000..1e20a52b41e782779269a51c674d7fea49be7b8c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio_param.py @@ -0,0 +1,23 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionAudioParam"] + + +class ChatCompletionAudioParam(TypedDict, total=False): + format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]] + """Specifies the output audio format. + + Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`. + """ + + voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]] + """The voice the model uses to respond. + + Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also + supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices + are less expressive). + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_chunk.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_chunk.py new file mode 100644 index 0000000000000000000000000000000000000000..dede513f1e1693bebb811aa5d068b562467b959c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_chunk.py @@ -0,0 +1,147 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from ..completion_usage import CompletionUsage +from .chat_completion_token_logprob import ChatCompletionTokenLogprob + +__all__ = [ + "ChatCompletionChunk", + "Choice", + "ChoiceDelta", + "ChoiceDeltaFunctionCall", + "ChoiceDeltaToolCall", + "ChoiceDeltaToolCallFunction", + "ChoiceLogprobs", +] + + +class ChoiceDeltaFunctionCall(BaseModel): + arguments: Optional[str] = None + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Optional[str] = None + """The name of the function to call.""" + + +class ChoiceDeltaToolCallFunction(BaseModel): + arguments: Optional[str] = None + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Optional[str] = None + """The name of the function to call.""" + + +class ChoiceDeltaToolCall(BaseModel): + index: int + + id: Optional[str] = None + """The ID of the tool call.""" + + function: Optional[ChoiceDeltaToolCallFunction] = None + + type: Optional[Literal["function"]] = None + """The type of the tool. Currently, only `function` is supported.""" + + +class ChoiceDelta(BaseModel): + content: Optional[str] = None + """The contents of the chunk message.""" + + function_call: Optional[ChoiceDeltaFunctionCall] = None + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the + model. + """ + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None + """The role of the author of this message.""" + + tool_calls: Optional[List[ChoiceDeltaToolCall]] = None + + +class ChoiceLogprobs(BaseModel): + content: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message content tokens with log probability information.""" + + refusal: Optional[List[ChatCompletionTokenLogprob]] = None + """A list of message refusal tokens with log probability information.""" + + +class Choice(BaseModel): + delta: ChoiceDelta + """A chat completion delta generated by streamed model responses.""" + + finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter", "function_call"]] = None + """The reason the model stopped generating tokens. + + This will be `stop` if the model hit a natural stop point or a provided stop + sequence, `length` if the maximum number of tokens specified in the request was + reached, `content_filter` if content was omitted due to a flag from our content + filters, `tool_calls` if the model called a tool, or `function_call` + (deprecated) if the model called a function. + """ + + index: int + """The index of the choice in the list of choices.""" + + logprobs: Optional[ChoiceLogprobs] = None + """Log probability information for the choice.""" + + +class ChatCompletionChunk(BaseModel): + id: str + """A unique identifier for the chat completion. Each chunk has the same ID.""" + + choices: List[Choice] + """A list of chat completion choices. + + Can contain more than one elements if `n` is greater than 1. Can also be empty + for the last chunk if you set `stream_options: {"include_usage": true}`. + """ + + created: int + """The Unix timestamp (in seconds) of when the chat completion was created. + + Each chunk has the same timestamp. + """ + + model: str + """The model to generate the completion.""" + + object: Literal["chat.completion.chunk"] + """The object type, which is always `chat.completion.chunk`.""" + + service_tier: Optional[Literal["scale", "default"]] = None + """The service tier used for processing the request.""" + + system_fingerprint: Optional[str] = None + """ + This fingerprint represents the backend configuration that the model runs with. + Can be used in conjunction with the `seed` request parameter to understand when + backend changes have been made that might impact determinism. + """ + + usage: Optional[CompletionUsage] = None + """ + An optional field that will only be present when you set + `stream_options: {"include_usage": true}` in your request. When present, it + contains a null value except for the last chunk which contains the token usage + statistics for the entire request. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_image_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_image_param.py new file mode 100644 index 0000000000000000000000000000000000000000..9d407324d078a4cc094156e749aacd80a5cdd265 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_image_param.py @@ -0,0 +1,26 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartImageParam", "ImageURL"] + + +class ImageURL(TypedDict, total=False): + url: Required[str] + """Either a URL of the image or the base64 encoded image data.""" + + detail: Literal["auto", "low", "high"] + """Specifies the detail level of the image. + + Learn more in the + [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding). + """ + + +class ChatCompletionContentPartImageParam(TypedDict, total=False): + image_url: Required[ImageURL] + + type: Required[Literal["image_url"]] + """The type of the content part.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py new file mode 100644 index 0000000000000000000000000000000000000000..0b1b1a80b101fc9de06d45fd1a630da2127f1132 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py @@ -0,0 +1,22 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"] + + +class InputAudio(TypedDict, total=False): + data: Required[str] + """Base64 encoded audio data.""" + + format: Required[Literal["wav", "mp3"]] + """The format of the encoded audio data. Currently supports "wav" and "mp3".""" + + +class ChatCompletionContentPartInputAudioParam(TypedDict, total=False): + input_audio: Required[InputAudio] + + type: Required[Literal["input_audio"]] + """The type of the content part. Always `input_audio`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_param.py new file mode 100644 index 0000000000000000000000000000000000000000..682d11f4c732cb4249f4ff41256e4437f088d4b4 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam +from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam +from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam + +__all__ = ["ChatCompletionContentPartParam"] + +ChatCompletionContentPartParam: TypeAlias = Union[ + ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py new file mode 100644 index 0000000000000000000000000000000000000000..c18c7db770d6051f29f4ea8ef6a355007d1fe24c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartRefusalParam"] + + +class ChatCompletionContentPartRefusalParam(TypedDict, total=False): + refusal: Required[str] + """The refusal message generated by the model.""" + + type: Required[Literal["refusal"]] + """The type of the content part.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_text_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_text_param.py new file mode 100644 index 0000000000000000000000000000000000000000..a270744417f80d53119823c1adc17a458ac600ec --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_text_param.py @@ -0,0 +1,15 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionContentPartTextParam"] + + +class ChatCompletionContentPartTextParam(TypedDict, total=False): + text: Required[str] + """The text content.""" + + type: Required[Literal["text"]] + """The type of the content part.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_developer_message_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_developer_message_param.py new file mode 100644 index 0000000000000000000000000000000000000000..01e4fdb6547d3e94a471ff1dcc228e1bef09fd6a --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_developer_message_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionDeveloperMessageParam"] + + +class ChatCompletionDeveloperMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """The contents of the developer message.""" + + role: Required[Literal["developer"]] + """The role of the messages author, in this case `developer`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_call_option_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_call_option_param.py new file mode 100644 index 0000000000000000000000000000000000000000..2bc014af7a5f78d0c0c5627dcb9ae558e581b958 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_call_option_param.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Required, TypedDict + +__all__ = ["ChatCompletionFunctionCallOptionParam"] + + +class ChatCompletionFunctionCallOptionParam(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_message_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_message_param.py new file mode 100644 index 0000000000000000000000000000000000000000..5af12bf94faaba1b66a7c6104227ac0ea5aa38c0 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_message_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionFunctionMessageParam"] + + +class ChatCompletionFunctionMessageParam(TypedDict, total=False): + content: Required[Optional[str]] + """The contents of the function message.""" + + name: Required[str] + """The name of the function to call.""" + + role: Required[Literal["function"]] + """The role of the messages author, in this case `function`.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message.py new file mode 100644 index 0000000000000000000000000000000000000000..704fa5d5d14b968a7c56e384bdc7c6c8ad249136 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message.py @@ -0,0 +1,51 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional +from typing_extensions import Literal + +from ..._models import BaseModel +from .chat_completion_audio import ChatCompletionAudio +from .chat_completion_message_tool_call import ChatCompletionMessageToolCall + +__all__ = ["ChatCompletionMessage", "FunctionCall"] + + +class FunctionCall(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChatCompletionMessage(BaseModel): + content: Optional[str] = None + """The contents of the message.""" + + refusal: Optional[str] = None + """The refusal message generated by the model.""" + + role: Literal["assistant"] + """The role of the author of this message.""" + + audio: Optional[ChatCompletionAudio] = None + """ + If the audio output modality is requested, this object contains data about the + audio response from the model. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + + function_call: Optional[FunctionCall] = None + """Deprecated and replaced by `tool_calls`. + + The name and arguments of a function that should be called, as generated by the + model. + """ + + tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None + """The tool calls generated by the model, such as function calls.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_param.py new file mode 100644 index 0000000000000000000000000000000000000000..942da243041d34e67a9f5ccde0d2c739f27ff8f1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_param.py @@ -0,0 +1,24 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import TypeAlias + +from .chat_completion_tool_message_param import ChatCompletionToolMessageParam +from .chat_completion_user_message_param import ChatCompletionUserMessageParam +from .chat_completion_system_message_param import ChatCompletionSystemMessageParam +from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam +from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam +from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam + +__all__ = ["ChatCompletionMessageParam"] + +ChatCompletionMessageParam: TypeAlias = Union[ + ChatCompletionDeveloperMessageParam, + ChatCompletionSystemMessageParam, + ChatCompletionUserMessageParam, + ChatCompletionAssistantMessageParam, + ChatCompletionToolMessageParam, + ChatCompletionFunctionMessageParam, +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call.py new file mode 100644 index 0000000000000000000000000000000000000000..4fec667096f3c6cf5e9bd9a94d9fcacf203f0c77 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ChatCompletionMessageToolCall", "Function"] + + +class Function(BaseModel): + arguments: str + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: str + """The name of the function to call.""" + + +class ChatCompletionMessageToolCall(BaseModel): + id: str + """The ID of the tool call.""" + + function: Function + """The function that the model called.""" + + type: Literal["function"] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py new file mode 100644 index 0000000000000000000000000000000000000000..f616c363d01cac5f419766bd4610f3e9a5c5e9fb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py @@ -0,0 +1,31 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionMessageToolCallParam", "Function"] + + +class Function(TypedDict, total=False): + arguments: Required[str] + """ + The arguments to call the function with, as generated by the model in JSON + format. Note that the model does not always generate valid JSON, and may + hallucinate parameters not defined by your function schema. Validate the + arguments in your code before calling your function. + """ + + name: Required[str] + """The name of the function to call.""" + + +class ChatCompletionMessageToolCallParam(TypedDict, total=False): + id: Required[str] + """The ID of the tool call.""" + + function: Required[Function] + """The function that the model called.""" + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_modality.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_modality.py new file mode 100644 index 0000000000000000000000000000000000000000..8e3c1459790bad30989afe41f995eef4341f6f7c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_modality.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatCompletionModality"] + +ChatCompletionModality: TypeAlias = Literal["text", "audio"] diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py new file mode 100644 index 0000000000000000000000000000000000000000..369f8b42dd4ffe179464e7fd32f22065baf94b3f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py @@ -0,0 +1,19 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ChatCompletionNamedToolChoiceParam", "Function"] + + +class Function(TypedDict, total=False): + name: Required[str] + """The name of the function to call.""" + + +class ChatCompletionNamedToolChoiceParam(TypedDict, total=False): + function: Required[Function] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_prediction_content_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_prediction_content_param.py new file mode 100644 index 0000000000000000000000000000000000000000..c44e6e365360c8ae90019e1d0d9cb356265a368f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_prediction_content_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionPredictionContentParam"] + + +class ChatCompletionPredictionContentParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """ + The content that should be matched when generating a model response. If + generated tokens would match this content, the entire model response can be + returned much more quickly. + """ + + type: Required[Literal["content"]] + """The type of the predicted content you want to provide. + + This type is currently always `content`. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_reasoning_effort.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_reasoning_effort.py new file mode 100644 index 0000000000000000000000000000000000000000..9e7946974a9da182aeecc63e30a0e0e64ad692d1 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_reasoning_effort.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatCompletionReasoningEffort"] + +ChatCompletionReasoningEffort: TypeAlias = Literal["low", "medium", "high"] diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_role.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_role.py new file mode 100644 index 0000000000000000000000000000000000000000..3ec5e9ad87754b5b95e4500b1a8ee2f8d7f31c18 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_role.py @@ -0,0 +1,7 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal, TypeAlias + +__all__ = ["ChatCompletionRole"] + +ChatCompletionRole: TypeAlias = Literal["developer", "system", "user", "assistant", "tool", "function"] diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_stream_options_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_stream_options_param.py new file mode 100644 index 0000000000000000000000000000000000000000..fbf72918211515cab9becb82b725b1afd203125f --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_stream_options_param.py @@ -0,0 +1,17 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import TypedDict + +__all__ = ["ChatCompletionStreamOptionsParam"] + + +class ChatCompletionStreamOptionsParam(TypedDict, total=False): + include_usage: bool + """If set, an additional chunk will be streamed before the `data: [DONE]` message. + + The `usage` field on this chunk shows the token usage statistics for the entire + request, and the `choices` field will always be an empty array. All other chunks + will also include a `usage` field, but with a null value. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_system_message_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_system_message_param.py new file mode 100644 index 0000000000000000000000000000000000000000..172ccea09effaa1334c3c774a047461e2df69caf --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_system_message_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionSystemMessageParam"] + + +class ChatCompletionSystemMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """The contents of the system message.""" + + role: Required[Literal["system"]] + """The role of the messages author, in this case `system`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_token_logprob.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_token_logprob.py new file mode 100644 index 0000000000000000000000000000000000000000..c69e258910d2390b98345a21ee52c78b966c4d76 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_token_logprob.py @@ -0,0 +1,57 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Optional + +from ..._models import BaseModel + +__all__ = ["ChatCompletionTokenLogprob", "TopLogprob"] + + +class TopLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + +class ChatCompletionTokenLogprob(BaseModel): + token: str + """The token.""" + + bytes: Optional[List[int]] = None + """A list of integers representing the UTF-8 bytes representation of the token. + + Useful in instances where characters are represented by multiple tokens and + their byte representations must be combined to generate the correct text + representation. Can be `null` if there is no bytes representation for the token. + """ + + logprob: float + """The log probability of this token, if it is within the top 20 most likely + tokens. + + Otherwise, the value `-9999.0` is used to signify that the token is very + unlikely. + """ + + top_logprobs: List[TopLogprob] + """List of the most likely tokens and their log probability, at this token + position. + + In rare cases, there may be fewer than the number of requested `top_logprobs` + returned. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py new file mode 100644 index 0000000000000000000000000000000000000000..7dedf041b797befe982983b313730b2b8994a85e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py @@ -0,0 +1,14 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union +from typing_extensions import Literal, TypeAlias + +from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam + +__all__ = ["ChatCompletionToolChoiceOptionParam"] + +ChatCompletionToolChoiceOptionParam: TypeAlias = Union[ + Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam +] diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_message_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_message_param.py new file mode 100644 index 0000000000000000000000000000000000000000..eb5e270e475fab7c329545fa90d0836352b2a2a9 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_message_param.py @@ -0,0 +1,21 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam + +__all__ = ["ChatCompletionToolMessageParam"] + + +class ChatCompletionToolMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]] + """The contents of the tool message.""" + + role: Required[Literal["tool"]] + """The role of the messages author, in this case `tool`.""" + + tool_call_id: Required[str] + """Tool call that this message is responding to.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_param.py new file mode 100644 index 0000000000000000000000000000000000000000..6c2b1a36f0da8446ebe3b15652f697c067eb60db --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_param.py @@ -0,0 +1,16 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +from ..shared_params.function_definition import FunctionDefinition + +__all__ = ["ChatCompletionToolParam"] + + +class ChatCompletionToolParam(TypedDict, total=False): + function: Required[FunctionDefinition] + + type: Required[Literal["function"]] + """The type of the tool. Currently, only `function` is supported.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_user_message_param.py b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_user_message_param.py new file mode 100644 index 0000000000000000000000000000000000000000..5c15322a229eb738ea00f4c5c8bd458272f123fa --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_user_message_param.py @@ -0,0 +1,25 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Union, Iterable +from typing_extensions import Literal, Required, TypedDict + +from .chat_completion_content_part_param import ChatCompletionContentPartParam + +__all__ = ["ChatCompletionUserMessageParam"] + + +class ChatCompletionUserMessageParam(TypedDict, total=False): + content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]] + """The contents of the user message.""" + + role: Required[Literal["user"]] + """The role of the messages author, in this case `user`.""" + + name: str + """An optional name for the participant. + + Provides the model information to differentiate between participants of the same + role. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/completion_create_params.py b/.venv/lib/python3.11/site-packages/openai/types/chat/completion_create_params.py new file mode 100644 index 0000000000000000000000000000000000000000..ec88ea1fb0bb71ba5895ce6c07ca4bffd4b2bdd6 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/completion_create_params.py @@ -0,0 +1,352 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, List, Union, Iterable, Optional +from typing_extensions import Literal, Required, TypeAlias, TypedDict + +from ..chat_model import ChatModel +from ..shared_params.metadata import Metadata +from .chat_completion_modality import ChatCompletionModality +from .chat_completion_tool_param import ChatCompletionToolParam +from .chat_completion_audio_param import ChatCompletionAudioParam +from .chat_completion_message_param import ChatCompletionMessageParam +from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort +from ..shared_params.function_parameters import FunctionParameters +from ..shared_params.response_format_text import ResponseFormatText +from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam +from .chat_completion_prediction_content_param import ChatCompletionPredictionContentParam +from .chat_completion_tool_choice_option_param import ChatCompletionToolChoiceOptionParam +from ..shared_params.response_format_json_object import ResponseFormatJSONObject +from ..shared_params.response_format_json_schema import ResponseFormatJSONSchema +from .chat_completion_function_call_option_param import ChatCompletionFunctionCallOptionParam + +__all__ = [ + "CompletionCreateParamsBase", + "FunctionCall", + "Function", + "ResponseFormat", + "CompletionCreateParamsNonStreaming", + "CompletionCreateParamsStreaming", +] + + +class CompletionCreateParamsBase(TypedDict, total=False): + messages: Required[Iterable[ChatCompletionMessageParam]] + """A list of messages comprising the conversation so far. + + Depending on the [model](https://platform.openai.com/docs/models) you use, + different message types (modalities) are supported, like + [text](https://platform.openai.com/docs/guides/text-generation), + [images](https://platform.openai.com/docs/guides/vision), and + [audio](https://platform.openai.com/docs/guides/audio). + """ + + model: Required[Union[str, ChatModel]] + """ID of the model to use. + + See the + [model endpoint compatibility](https://platform.openai.com/docs/models#model-endpoint-compatibility) + table for details on which models work with the Chat API. + """ + + audio: Optional[ChatCompletionAudioParam] + """Parameters for audio output. + + Required when audio output is requested with `modalities: ["audio"]`. + [Learn more](https://platform.openai.com/docs/guides/audio). + """ + + frequency_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on their existing frequency in the + text so far, decreasing the model's likelihood to repeat the same line verbatim. + """ + + function_call: FunctionCall + """Deprecated in favor of `tool_choice`. + + Controls which (if any) function is called by the model. + + `none` means the model will not call a function and instead generates a message. + + `auto` means the model can pick between generating a message or calling a + function. + + Specifying a particular function via `{"name": "my_function"}` forces the model + to call that function. + + `none` is the default when no functions are present. `auto` is the default if + functions are present. + """ + + functions: Iterable[Function] + """Deprecated in favor of `tools`. + + A list of functions the model may generate JSON inputs for. + """ + + logit_bias: Optional[Dict[str, int]] + """Modify the likelihood of specified tokens appearing in the completion. + + Accepts a JSON object that maps tokens (specified by their token ID in the + tokenizer) to an associated bias value from -100 to 100. Mathematically, the + bias is added to the logits generated by the model prior to sampling. The exact + effect will vary per model, but values between -1 and 1 should decrease or + increase likelihood of selection; values like -100 or 100 should result in a ban + or exclusive selection of the relevant token. + """ + + logprobs: Optional[bool] + """Whether to return log probabilities of the output tokens or not. + + If true, returns the log probabilities of each output token returned in the + `content` of `message`. + """ + + max_completion_tokens: Optional[int] + """ + An upper bound for the number of tokens that can be generated for a completion, + including visible output tokens and + [reasoning tokens](https://platform.openai.com/docs/guides/reasoning). + """ + + max_tokens: Optional[int] + """ + The maximum number of [tokens](/tokenizer) that can be generated in the chat + completion. This value can be used to control + [costs](https://openai.com/api/pricing/) for text generated via API. + + This value is now deprecated in favor of `max_completion_tokens`, and is not + compatible with + [o1 series models](https://platform.openai.com/docs/guides/reasoning). + """ + + metadata: Optional[Metadata] + """Set of 16 key-value pairs that can be attached to an object. + + This can be useful for storing additional information about the object in a + structured format, and querying for objects via API or the dashboard. + + Keys are strings with a maximum length of 64 characters. Values are strings with + a maximum length of 512 characters. + """ + + modalities: Optional[List[ChatCompletionModality]] + """ + Output types that you would like the model to generate for this request. Most + models are capable of generating text, which is the default: + + `["text"]` + + The `gpt-4o-audio-preview` model can also be used to + [generate audio](https://platform.openai.com/docs/guides/audio). To request that + this model generate both text and audio responses, you can use: + + `["text", "audio"]` + """ + + n: Optional[int] + """How many chat completion choices to generate for each input message. + + Note that you will be charged based on the number of generated tokens across all + of the choices. Keep `n` as `1` to minimize costs. + """ + + parallel_tool_calls: bool + """ + Whether to enable + [parallel function calling](https://platform.openai.com/docs/guides/function-calling#configuring-parallel-function-calling) + during tool use. + """ + + prediction: Optional[ChatCompletionPredictionContentParam] + """ + Static predicted output content, such as the content of a text file that is + being regenerated. + """ + + presence_penalty: Optional[float] + """Number between -2.0 and 2.0. + + Positive values penalize new tokens based on whether they appear in the text so + far, increasing the model's likelihood to talk about new topics. + """ + + reasoning_effort: ChatCompletionReasoningEffort + """**o1 models only** + + Constrains effort on reasoning for + [reasoning models](https://platform.openai.com/docs/guides/reasoning). Currently + supported values are `low`, `medium`, and `high`. Reducing reasoning effort can + result in faster responses and fewer tokens used on reasoning in a response. + """ + + response_format: ResponseFormat + """An object specifying the format that the model must output. + + Setting to `{ "type": "json_schema", "json_schema": {...} }` enables Structured + Outputs which ensures the model will match your supplied JSON schema. Learn more + in the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + + Setting to `{ "type": "json_object" }` enables JSON mode, which ensures the + message the model generates is valid JSON. + + **Important:** when using JSON mode, you **must** also instruct the model to + produce JSON yourself via a system or user message. Without this, the model may + generate an unending stream of whitespace until the generation reaches the token + limit, resulting in a long-running and seemingly "stuck" request. Also note that + the message content may be partially cut off if `finish_reason="length"`, which + indicates the generation exceeded `max_tokens` or the conversation exceeded the + max context length. + """ + + seed: Optional[int] + """ + This feature is in Beta. If specified, our system will make a best effort to + sample deterministically, such that repeated requests with the same `seed` and + parameters should return the same result. Determinism is not guaranteed, and you + should refer to the `system_fingerprint` response parameter to monitor changes + in the backend. + """ + + service_tier: Optional[Literal["auto", "default"]] + """Specifies the latency tier to use for processing the request. + + This parameter is relevant for customers subscribed to the scale tier service: + + - If set to 'auto', and the Project is Scale tier enabled, the system will + utilize scale tier credits until they are exhausted. + - If set to 'auto', and the Project is not Scale tier enabled, the request will + be processed using the default service tier with a lower uptime SLA and no + latency guarantee. + - If set to 'default', the request will be processed using the default service + tier with a lower uptime SLA and no latency guarantee. + - When not set, the default behavior is 'auto'. + """ + + stop: Union[Optional[str], List[str]] + """Up to 4 sequences where the API will stop generating further tokens.""" + + store: Optional[bool] + """ + Whether or not to store the output of this chat completion request for use in + our [model distillation](https://platform.openai.com/docs/guides/distillation) + or [evals](https://platform.openai.com/docs/guides/evals) products. + """ + + stream_options: Optional[ChatCompletionStreamOptionsParam] + """Options for streaming response. Only set this when you set `stream: true`.""" + + temperature: Optional[float] + """What sampling temperature to use, between 0 and 2. + + Higher values like 0.8 will make the output more random, while lower values like + 0.2 will make it more focused and deterministic. We generally recommend altering + this or `top_p` but not both. + """ + + tool_choice: ChatCompletionToolChoiceOptionParam + """ + Controls which (if any) tool is called by the model. `none` means the model will + not call any tool and instead generates a message. `auto` means the model can + pick between generating a message or calling one or more tools. `required` means + the model must call one or more tools. Specifying a particular tool via + `{"type": "function", "function": {"name": "my_function"}}` forces the model to + call that tool. + + `none` is the default when no tools are present. `auto` is the default if tools + are present. + """ + + tools: Iterable[ChatCompletionToolParam] + """A list of tools the model may call. + + Currently, only functions are supported as a tool. Use this to provide a list of + functions the model may generate JSON inputs for. A max of 128 functions are + supported. + """ + + top_logprobs: Optional[int] + """ + An integer between 0 and 20 specifying the number of most likely tokens to + return at each token position, each with an associated log probability. + `logprobs` must be set to `true` if this parameter is used. + """ + + top_p: Optional[float] + """ + An alternative to sampling with temperature, called nucleus sampling, where the + model considers the results of the tokens with top_p probability mass. So 0.1 + means only the tokens comprising the top 10% probability mass are considered. + + We generally recommend altering this or `temperature` but not both. + """ + + user: str + """ + A unique identifier representing your end-user, which can help OpenAI to monitor + and detect abuse. + [Learn more](https://platform.openai.com/docs/guides/safety-best-practices#end-user-ids). + """ + + +FunctionCall: TypeAlias = Union[Literal["none", "auto"], ChatCompletionFunctionCallOptionParam] + + +class Function(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: FunctionParameters + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](https://platform.openai.com/docs/guides/function-calling) for + examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + +ResponseFormat: TypeAlias = Union[ResponseFormatText, ResponseFormatJSONObject, ResponseFormatJSONSchema] + + +class CompletionCreateParamsNonStreaming(CompletionCreateParamsBase, total=False): + stream: Optional[Literal[False]] + """If set, partial message deltas will be sent, like in ChatGPT. + + Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` + message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + """ + + +class CompletionCreateParamsStreaming(CompletionCreateParamsBase): + stream: Required[Literal[True]] + """If set, partial message deltas will be sent, like in ChatGPT. + + Tokens will be sent as data-only + [server-sent events](https://developer.mozilla.org/en-US/docs/Web/API/Server-sent_events/Using_server-sent_events#Event_stream_format) + as they become available, with the stream terminated by a `data: [DONE]` + message. + [Example Python code](https://cookbook.openai.com/examples/how_to_stream_completions). + """ + + +CompletionCreateParams = Union[CompletionCreateParamsNonStreaming, CompletionCreateParamsStreaming] diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/parsed_chat_completion.py b/.venv/lib/python3.11/site-packages/openai/types/chat/parsed_chat_completion.py new file mode 100644 index 0000000000000000000000000000000000000000..4b11dac5a0d0c66e205c769d19f800205211cf5e --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/parsed_chat_completion.py @@ -0,0 +1,40 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import List, Generic, TypeVar, Optional + +from ..._models import GenericModel +from .chat_completion import Choice, ChatCompletion +from .chat_completion_message import ChatCompletionMessage +from .parsed_function_tool_call import ParsedFunctionToolCall + +__all__ = ["ParsedChatCompletion", "ParsedChoice"] + + +ContentType = TypeVar("ContentType") + + +# we need to disable this check because we're overriding properties +# with subclasses of their types which is technically unsound as +# properties can be mutated. +# pyright: reportIncompatibleVariableOverride=false + + +class ParsedChatCompletionMessage(ChatCompletionMessage, GenericModel, Generic[ContentType]): + parsed: Optional[ContentType] = None + """The auto-parsed message contents""" + + tool_calls: Optional[List[ParsedFunctionToolCall]] = None # type: ignore[assignment] + """The tool calls generated by the model, such as function calls.""" + + +class ParsedChoice(Choice, GenericModel, Generic[ContentType]): + message: ParsedChatCompletionMessage[ContentType] + """A chat completion message generated by the model.""" + + +class ParsedChatCompletion(ChatCompletion, GenericModel, Generic[ContentType]): + choices: List[ParsedChoice[ContentType]] # type: ignore[assignment] + """A list of chat completion choices. + + Can be more than one if `n` is greater than 1. + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/chat/parsed_function_tool_call.py b/.venv/lib/python3.11/site-packages/openai/types/chat/parsed_function_tool_call.py new file mode 100644 index 0000000000000000000000000000000000000000..3e90789f8567c66b28d554a0b3143f20bb48a2b2 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/chat/parsed_function_tool_call.py @@ -0,0 +1,29 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from .chat_completion_message_tool_call import Function, ChatCompletionMessageToolCall + +__all__ = ["ParsedFunctionToolCall", "ParsedFunction"] + +# we need to disable this check because we're overriding properties +# with subclasses of their types which is technically unsound as +# properties can be mutated. +# pyright: reportIncompatibleVariableOverride=false + + +class ParsedFunction(Function): + parsed_arguments: Optional[object] = None + """ + The arguments to call the function with. + + If you used `openai.pydantic_function_tool()` then this will be an + instance of the given `BaseModel`. + + Otherwise, this will be the parsed JSON arguments. + """ + + +class ParsedFunctionToolCall(ChatCompletionMessageToolCall): + function: ParsedFunction + """The function that the model called.""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared/__init__.py b/.venv/lib/python3.11/site-packages/openai/types/shared/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..74bf3049041f42d5caf0e2cc73676fcdf1a357e7 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared/__init__.py @@ -0,0 +1,9 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .metadata import Metadata as Metadata +from .error_object import ErrorObject as ErrorObject +from .function_definition import FunctionDefinition as FunctionDefinition +from .function_parameters import FunctionParameters as FunctionParameters +from .response_format_text import ResponseFormatText as ResponseFormatText +from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject +from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared/__pycache__/function_definition.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared/__pycache__/function_definition.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6dda1eea5fdfefa191a7095aed500496eecd65b7 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared/__pycache__/function_definition.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared/function_definition.py b/.venv/lib/python3.11/site-packages/openai/types/shared/function_definition.py new file mode 100644 index 0000000000000000000000000000000000000000..06baa2317047b5b4829d8512625a5483416ebe7c --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared/function_definition.py @@ -0,0 +1,43 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing import Optional + +from ..._models import BaseModel +from .function_parameters import FunctionParameters + +__all__ = ["FunctionDefinition"] + + +class FunctionDefinition(BaseModel): + name: str + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: Optional[str] = None + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: Optional[FunctionParameters] = None + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](https://platform.openai.com/docs/guides/function-calling) for + examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + strict: Optional[bool] = None + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared/response_format_text.py b/.venv/lib/python3.11/site-packages/openai/types/shared/response_format_text.py new file mode 100644 index 0000000000000000000000000000000000000000..6721fe097309af7e99f19d7873a0f330231c0feb --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared/response_format_text.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from typing_extensions import Literal + +from ..._models import BaseModel + +__all__ = ["ResponseFormatText"] + + +class ResponseFormatText(BaseModel): + type: Literal["text"] + """The type of response format being defined: `text`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__init__.py b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..68a8db75fe45c74edfab754a0f69f91f52674c38 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__init__.py @@ -0,0 +1,8 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from .metadata import Metadata as Metadata +from .function_definition import FunctionDefinition as FunctionDefinition +from .function_parameters import FunctionParameters as FunctionParameters +from .response_format_text import ResponseFormatText as ResponseFormatText +from .response_format_json_object import ResponseFormatJSONObject as ResponseFormatJSONObject +from .response_format_json_schema import ResponseFormatJSONSchema as ResponseFormatJSONSchema diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..0ad3870dd45c9272306ab164d67554fe231fd938 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/function_definition.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/function_definition.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..99268fcbc715bcc805ef81aa4f6ce063edc2b71a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/function_definition.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/function_parameters.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/function_parameters.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..1a94d5379353d7dd854369482d873b397ebd4155 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/function_parameters.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/metadata.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/metadata.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..4011bbc93366034f21ed84b0e38d8f5dbd86c656 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/metadata.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_json_object.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_json_object.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..dd45d482458b0175dab1e95a16d77859da79c755 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_json_object.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_json_schema.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_json_schema.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f553cd6f5e3844580ff1f43bf95c0155b287839d Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_json_schema.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_text.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_text.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..6c601430057c5c7c699772b5e321802fb9a04843 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/shared_params/__pycache__/response_format_text.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/function_definition.py b/.venv/lib/python3.11/site-packages/openai/types/shared_params/function_definition.py new file mode 100644 index 0000000000000000000000000000000000000000..d45ec13f1e8e723e101546661684f89593de0627 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared_params/function_definition.py @@ -0,0 +1,45 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Optional +from typing_extensions import Required, TypedDict + +from .function_parameters import FunctionParameters + +__all__ = ["FunctionDefinition"] + + +class FunctionDefinition(TypedDict, total=False): + name: Required[str] + """The name of the function to be called. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the function does, used by the model to choose when and + how to call the function. + """ + + parameters: FunctionParameters + """The parameters the functions accepts, described as a JSON Schema object. + + See the [guide](https://platform.openai.com/docs/guides/function-calling) for + examples, and the + [JSON Schema reference](https://json-schema.org/understanding-json-schema/) for + documentation about the format. + + Omitting `parameters` defines a function with an empty parameter list. + """ + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the function call. + + If set to true, the model will follow the exact schema defined in the + `parameters` field. Only a subset of JSON Schema is supported when `strict` is + `true`. Learn more about Structured Outputs in the + [function calling guide](docs/guides/function-calling). + """ diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/function_parameters.py b/.venv/lib/python3.11/site-packages/openai/types/shared_params/function_parameters.py new file mode 100644 index 0000000000000000000000000000000000000000..45fc742d3ba84a7ec508b1122fe106c23f484a78 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared_params/function_parameters.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import TypeAlias + +__all__ = ["FunctionParameters"] + +FunctionParameters: TypeAlias = Dict[str, object] diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/metadata.py b/.venv/lib/python3.11/site-packages/openai/types/shared_params/metadata.py new file mode 100644 index 0000000000000000000000000000000000000000..821650b48b0210e86c02d8964bd5f7d427767c14 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared_params/metadata.py @@ -0,0 +1,10 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict +from typing_extensions import TypeAlias + +__all__ = ["Metadata"] + +Metadata: TypeAlias = Dict[str, str] diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_json_object.py b/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_json_object.py new file mode 100644 index 0000000000000000000000000000000000000000..8419c6cb56e44e70a2fd0115800c78f3643a5287 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_json_object.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJSONObject"] + + +class ResponseFormatJSONObject(TypedDict, total=False): + type: Required[Literal["json_object"]] + """The type of response format being defined: `json_object`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_json_schema.py b/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_json_schema.py new file mode 100644 index 0000000000000000000000000000000000000000..4b60fae8ee5c8523fbbb1bf0188b1bb4be820590 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_json_schema.py @@ -0,0 +1,42 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing import Dict, Optional +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatJSONSchema", "JSONSchema"] + + +class JSONSchema(TypedDict, total=False): + name: Required[str] + """The name of the response format. + + Must be a-z, A-Z, 0-9, or contain underscores and dashes, with a maximum length + of 64. + """ + + description: str + """ + A description of what the response format is for, used by the model to determine + how to respond in the format. + """ + + schema: Dict[str, object] + """The schema for the response format, described as a JSON Schema object.""" + + strict: Optional[bool] + """Whether to enable strict schema adherence when generating the output. + + If set to true, the model will always follow the exact schema defined in the + `schema` field. Only a subset of JSON Schema is supported when `strict` is + `true`. To learn more, read the + [Structured Outputs guide](https://platform.openai.com/docs/guides/structured-outputs). + """ + + +class ResponseFormatJSONSchema(TypedDict, total=False): + json_schema: Required[JSONSchema] + + type: Required[Literal["json_schema"]] + """The type of response format being defined: `json_schema`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_text.py b/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_text.py new file mode 100644 index 0000000000000000000000000000000000000000..5bec7fc503d2f83161560d0ce6a7cdb786f4eb87 --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/shared_params/response_format_text.py @@ -0,0 +1,12 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from typing_extensions import Literal, Required, TypedDict + +__all__ = ["ResponseFormatText"] + + +class ResponseFormatText(TypedDict, total=False): + type: Required[Literal["text"]] + """The type of response format being defined: `text`""" diff --git a/.venv/lib/python3.11/site-packages/openai/types/uploads/__init__.py b/.venv/lib/python3.11/site-packages/openai/types/uploads/__init__.py new file mode 100644 index 0000000000000000000000000000000000000000..41deb0ab4bf9c1987cbe67c34b010f30902dea4d --- /dev/null +++ b/.venv/lib/python3.11/site-packages/openai/types/uploads/__init__.py @@ -0,0 +1,6 @@ +# File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details. + +from __future__ import annotations + +from .upload_part import UploadPart as UploadPart +from .part_create_params import PartCreateParams as PartCreateParams diff --git a/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/__init__.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/__init__.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..5128d46409212dbd022072e14a79fe9b03e51e68 Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/__init__.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/part_create_params.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/part_create_params.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..98724153608291e5feac0ee3c4f3edd4f45cdb0a Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/part_create_params.cpython-311.pyc differ diff --git a/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/upload_part.cpython-311.pyc b/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/upload_part.cpython-311.pyc new file mode 100644 index 0000000000000000000000000000000000000000..f92c856a8ae0c78a755eb459af052c26dedff53f Binary files /dev/null and b/.venv/lib/python3.11/site-packages/openai/types/uploads/__pycache__/upload_part.cpython-311.pyc differ