koichi12 commited on
Commit
344d5fe
·
verified ·
1 Parent(s): bdb9fb6

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .venv/lib/python3.11/site-packages/openai/types/__pycache__/batch.cpython-311.pyc +0 -0
  2. .venv/lib/python3.11/site-packages/openai/types/__pycache__/completion.cpython-311.pyc +0 -0
  3. .venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_create_params.cpython-311.pyc +0 -0
  4. .venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_usage.cpython-311.pyc +0 -0
  5. .venv/lib/python3.11/site-packages/openai/types/__pycache__/image.cpython-311.pyc +0 -0
  6. .venv/lib/python3.11/site-packages/openai/types/__pycache__/image_create_variation_params.cpython-311.pyc +0 -0
  7. .venv/lib/python3.11/site-packages/openai/types/__pycache__/image_edit_params.cpython-311.pyc +0 -0
  8. .venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation.cpython-311.pyc +0 -0
  9. .venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation_multi_modal_input_param.cpython-311.pyc +0 -0
  10. .venv/lib/python3.11/site-packages/openai/types/__pycache__/upload_create_params.cpython-311.pyc +0 -0
  11. .venv/lib/python3.11/site-packages/openai/types/chat/__init__.py +67 -0
  12. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion.cpython-311.pyc +0 -0
  13. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio.cpython-311.pyc +0 -0
  14. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio_param.cpython-311.pyc +0 -0
  15. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_chunk.cpython-311.pyc +0 -0
  16. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-311.pyc +0 -0
  17. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-311.pyc +0 -0
  18. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message.cpython-311.pyc +0 -0
  19. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message_param.cpython-311.pyc +0 -0
  20. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-311.pyc +0 -0
  21. .venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-311.pyc +0 -0
  22. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion.py +73 -0
  23. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_assistant_message_param.py +70 -0
  24. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio.py +26 -0
  25. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio_param.py +23 -0
  26. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_chunk.py +147 -0
  27. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_image_param.py +26 -0
  28. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py +22 -0
  29. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_param.py +16 -0
  30. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py +15 -0
  31. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_text_param.py +15 -0
  32. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_developer_message_param.py +25 -0
  33. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_call_option_param.py +12 -0
  34. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_message_param.py +19 -0
  35. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message.py +51 -0
  36. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_param.py +24 -0
  37. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call.py +31 -0
  38. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py +31 -0
  39. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_modality.py +7 -0
  40. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py +19 -0
  41. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_prediction_content_param.py +25 -0
  42. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_reasoning_effort.py +7 -0
  43. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_role.py +7 -0
  44. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_stream_options_param.py +17 -0
  45. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_system_message_param.py +25 -0
  46. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_token_logprob.py +57 -0
  47. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py +14 -0
  48. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_message_param.py +21 -0
  49. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_param.py +16 -0
  50. .venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_user_message_param.py +25 -0
.venv/lib/python3.11/site-packages/openai/types/__pycache__/batch.cpython-311.pyc ADDED
Binary file (2.58 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion.cpython-311.pyc ADDED
Binary file (1.21 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_create_params.cpython-311.pyc ADDED
Binary file (2.64 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/completion_usage.cpython-311.pyc ADDED
Binary file (1.84 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/image.cpython-311.pyc ADDED
Binary file (791 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_create_variation_params.cpython-311.pyc ADDED
Binary file (1.29 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/image_edit_params.cpython-311.pyc ADDED
Binary file (1.36 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation.cpython-311.pyc ADDED
Binary file (4.87 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/moderation_multi_modal_input_param.cpython-311.pyc ADDED
Binary file (737 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/__pycache__/upload_create_params.cpython-311.pyc ADDED
Binary file (926 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__init__.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from .chat_completion import ChatCompletion as ChatCompletion
6
+ from .chat_completion_role import ChatCompletionRole as ChatCompletionRole
7
+ from .chat_completion_audio import ChatCompletionAudio as ChatCompletionAudio
8
+ from .chat_completion_chunk import ChatCompletionChunk as ChatCompletionChunk
9
+ from .parsed_chat_completion import (
10
+ ParsedChoice as ParsedChoice,
11
+ ParsedChatCompletion as ParsedChatCompletion,
12
+ ParsedChatCompletionMessage as ParsedChatCompletionMessage,
13
+ )
14
+ from .chat_completion_message import ChatCompletionMessage as ChatCompletionMessage
15
+ from .chat_completion_modality import ChatCompletionModality as ChatCompletionModality
16
+ from .completion_create_params import CompletionCreateParams as CompletionCreateParams
17
+ from .parsed_function_tool_call import (
18
+ ParsedFunction as ParsedFunction,
19
+ ParsedFunctionToolCall as ParsedFunctionToolCall,
20
+ )
21
+ from .chat_completion_tool_param import ChatCompletionToolParam as ChatCompletionToolParam
22
+ from .chat_completion_audio_param import ChatCompletionAudioParam as ChatCompletionAudioParam
23
+ from .chat_completion_message_param import ChatCompletionMessageParam as ChatCompletionMessageParam
24
+ from .chat_completion_token_logprob import ChatCompletionTokenLogprob as ChatCompletionTokenLogprob
25
+ from .chat_completion_reasoning_effort import ChatCompletionReasoningEffort as ChatCompletionReasoningEffort
26
+ from .chat_completion_message_tool_call import ChatCompletionMessageToolCall as ChatCompletionMessageToolCall
27
+ from .chat_completion_content_part_param import ChatCompletionContentPartParam as ChatCompletionContentPartParam
28
+ from .chat_completion_tool_message_param import ChatCompletionToolMessageParam as ChatCompletionToolMessageParam
29
+ from .chat_completion_user_message_param import ChatCompletionUserMessageParam as ChatCompletionUserMessageParam
30
+ from .chat_completion_stream_options_param import ChatCompletionStreamOptionsParam as ChatCompletionStreamOptionsParam
31
+ from .chat_completion_system_message_param import ChatCompletionSystemMessageParam as ChatCompletionSystemMessageParam
32
+ from .chat_completion_function_message_param import (
33
+ ChatCompletionFunctionMessageParam as ChatCompletionFunctionMessageParam,
34
+ )
35
+ from .chat_completion_assistant_message_param import (
36
+ ChatCompletionAssistantMessageParam as ChatCompletionAssistantMessageParam,
37
+ )
38
+ from .chat_completion_content_part_text_param import (
39
+ ChatCompletionContentPartTextParam as ChatCompletionContentPartTextParam,
40
+ )
41
+ from .chat_completion_developer_message_param import (
42
+ ChatCompletionDeveloperMessageParam as ChatCompletionDeveloperMessageParam,
43
+ )
44
+ from .chat_completion_message_tool_call_param import (
45
+ ChatCompletionMessageToolCallParam as ChatCompletionMessageToolCallParam,
46
+ )
47
+ from .chat_completion_named_tool_choice_param import (
48
+ ChatCompletionNamedToolChoiceParam as ChatCompletionNamedToolChoiceParam,
49
+ )
50
+ from .chat_completion_content_part_image_param import (
51
+ ChatCompletionContentPartImageParam as ChatCompletionContentPartImageParam,
52
+ )
53
+ from .chat_completion_prediction_content_param import (
54
+ ChatCompletionPredictionContentParam as ChatCompletionPredictionContentParam,
55
+ )
56
+ from .chat_completion_tool_choice_option_param import (
57
+ ChatCompletionToolChoiceOptionParam as ChatCompletionToolChoiceOptionParam,
58
+ )
59
+ from .chat_completion_content_part_refusal_param import (
60
+ ChatCompletionContentPartRefusalParam as ChatCompletionContentPartRefusalParam,
61
+ )
62
+ from .chat_completion_function_call_option_param import (
63
+ ChatCompletionFunctionCallOptionParam as ChatCompletionFunctionCallOptionParam,
64
+ )
65
+ from .chat_completion_content_part_input_audio_param import (
66
+ ChatCompletionContentPartInputAudioParam as ChatCompletionContentPartInputAudioParam,
67
+ )
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion.cpython-311.pyc ADDED
Binary file (2.36 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio.cpython-311.pyc ADDED
Binary file (688 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_audio_param.cpython-311.pyc ADDED
Binary file (919 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_chunk.cpython-311.pyc ADDED
Binary file (4.1 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_input_audio_param.cpython-311.pyc ADDED
Binary file (1.19 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_content_part_refusal_param.cpython-311.pyc ADDED
Binary file (835 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message.cpython-311.pyc ADDED
Binary file (1.64 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_message_param.cpython-311.pyc ADDED
Binary file (1.24 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_reasoning_effort.cpython-311.pyc ADDED
Binary file (456 Bytes). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/__pycache__/chat_completion_system_message_param.cpython-311.pyc ADDED
Binary file (1.11 kB). View file
 
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from typing_extensions import Literal
5
+
6
+ from ..._models import BaseModel
7
+ from ..completion_usage import CompletionUsage
8
+ from .chat_completion_message import ChatCompletionMessage
9
+ from .chat_completion_token_logprob import ChatCompletionTokenLogprob
10
+
11
+ __all__ = ["ChatCompletion", "Choice", "ChoiceLogprobs"]
12
+
13
+
14
+ class ChoiceLogprobs(BaseModel):
15
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
16
+ """A list of message content tokens with log probability information."""
17
+
18
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
19
+ """A list of message refusal tokens with log probability information."""
20
+
21
+
22
+ class Choice(BaseModel):
23
+ finish_reason: Literal["stop", "length", "tool_calls", "content_filter", "function_call"]
24
+ """The reason the model stopped generating tokens.
25
+
26
+ This will be `stop` if the model hit a natural stop point or a provided stop
27
+ sequence, `length` if the maximum number of tokens specified in the request was
28
+ reached, `content_filter` if content was omitted due to a flag from our content
29
+ filters, `tool_calls` if the model called a tool, or `function_call`
30
+ (deprecated) if the model called a function.
31
+ """
32
+
33
+ index: int
34
+ """The index of the choice in the list of choices."""
35
+
36
+ logprobs: Optional[ChoiceLogprobs] = None
37
+ """Log probability information for the choice."""
38
+
39
+ message: ChatCompletionMessage
40
+ """A chat completion message generated by the model."""
41
+
42
+
43
+ class ChatCompletion(BaseModel):
44
+ id: str
45
+ """A unique identifier for the chat completion."""
46
+
47
+ choices: List[Choice]
48
+ """A list of chat completion choices.
49
+
50
+ Can be more than one if `n` is greater than 1.
51
+ """
52
+
53
+ created: int
54
+ """The Unix timestamp (in seconds) of when the chat completion was created."""
55
+
56
+ model: str
57
+ """The model used for the chat completion."""
58
+
59
+ object: Literal["chat.completion"]
60
+ """The object type, which is always `chat.completion`."""
61
+
62
+ service_tier: Optional[Literal["scale", "default"]] = None
63
+ """The service tier used for processing the request."""
64
+
65
+ system_fingerprint: Optional[str] = None
66
+ """This fingerprint represents the backend configuration that the model runs with.
67
+
68
+ Can be used in conjunction with the `seed` request parameter to understand when
69
+ backend changes have been made that might impact determinism.
70
+ """
71
+
72
+ usage: Optional[CompletionUsage] = None
73
+ """Usage statistics for the completion request."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_assistant_message_param.py ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable, Optional
6
+ from typing_extensions import Literal, Required, TypeAlias, TypedDict
7
+
8
+ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
9
+ from .chat_completion_message_tool_call_param import ChatCompletionMessageToolCallParam
10
+ from .chat_completion_content_part_refusal_param import ChatCompletionContentPartRefusalParam
11
+
12
+ __all__ = ["ChatCompletionAssistantMessageParam", "Audio", "ContentArrayOfContentPart", "FunctionCall"]
13
+
14
+
15
+ class Audio(TypedDict, total=False):
16
+ id: Required[str]
17
+ """Unique identifier for a previous audio response from the model."""
18
+
19
+
20
+ ContentArrayOfContentPart: TypeAlias = Union[ChatCompletionContentPartTextParam, ChatCompletionContentPartRefusalParam]
21
+
22
+
23
+ class FunctionCall(TypedDict, total=False):
24
+ arguments: Required[str]
25
+ """
26
+ The arguments to call the function with, as generated by the model in JSON
27
+ format. Note that the model does not always generate valid JSON, and may
28
+ hallucinate parameters not defined by your function schema. Validate the
29
+ arguments in your code before calling your function.
30
+ """
31
+
32
+ name: Required[str]
33
+ """The name of the function to call."""
34
+
35
+
36
+ class ChatCompletionAssistantMessageParam(TypedDict, total=False):
37
+ role: Required[Literal["assistant"]]
38
+ """The role of the messages author, in this case `assistant`."""
39
+
40
+ audio: Optional[Audio]
41
+ """Data about a previous audio response from the model.
42
+
43
+ [Learn more](https://platform.openai.com/docs/guides/audio).
44
+ """
45
+
46
+ content: Union[str, Iterable[ContentArrayOfContentPart], None]
47
+ """The contents of the assistant message.
48
+
49
+ Required unless `tool_calls` or `function_call` is specified.
50
+ """
51
+
52
+ function_call: Optional[FunctionCall]
53
+ """Deprecated and replaced by `tool_calls`.
54
+
55
+ The name and arguments of a function that should be called, as generated by the
56
+ model.
57
+ """
58
+
59
+ name: str
60
+ """An optional name for the participant.
61
+
62
+ Provides the model information to differentiate between participants of the same
63
+ role.
64
+ """
65
+
66
+ refusal: Optional[str]
67
+ """The refusal message by the assistant."""
68
+
69
+ tool_calls: Iterable[ChatCompletionMessageToolCallParam]
70
+ """The tool calls generated by the model, such as function calls."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+
4
+ from ..._models import BaseModel
5
+
6
+ __all__ = ["ChatCompletionAudio"]
7
+
8
+
9
+ class ChatCompletionAudio(BaseModel):
10
+ id: str
11
+ """Unique identifier for this audio response."""
12
+
13
+ data: str
14
+ """
15
+ Base64 encoded audio bytes generated by the model, in the format specified in
16
+ the request.
17
+ """
18
+
19
+ expires_at: int
20
+ """
21
+ The Unix timestamp (in seconds) for when this audio response will no longer be
22
+ accessible on the server for use in multi-turn conversations.
23
+ """
24
+
25
+ transcript: str
26
+ """Transcript of the audio generated by the model."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_audio_param.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionAudioParam"]
8
+
9
+
10
+ class ChatCompletionAudioParam(TypedDict, total=False):
11
+ format: Required[Literal["wav", "mp3", "flac", "opus", "pcm16"]]
12
+ """Specifies the output audio format.
13
+
14
+ Must be one of `wav`, `mp3`, `flac`, `opus`, or `pcm16`.
15
+ """
16
+
17
+ voice: Required[Literal["alloy", "ash", "ballad", "coral", "echo", "sage", "shimmer", "verse"]]
18
+ """The voice the model uses to respond.
19
+
20
+ Supported voices are `ash`, `ballad`, `coral`, `sage`, and `verse` (also
21
+ supported but not recommended are `alloy`, `echo`, and `shimmer`; these voices
22
+ are less expressive).
23
+ """
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_chunk.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from typing_extensions import Literal
5
+
6
+ from ..._models import BaseModel
7
+ from ..completion_usage import CompletionUsage
8
+ from .chat_completion_token_logprob import ChatCompletionTokenLogprob
9
+
10
+ __all__ = [
11
+ "ChatCompletionChunk",
12
+ "Choice",
13
+ "ChoiceDelta",
14
+ "ChoiceDeltaFunctionCall",
15
+ "ChoiceDeltaToolCall",
16
+ "ChoiceDeltaToolCallFunction",
17
+ "ChoiceLogprobs",
18
+ ]
19
+
20
+
21
+ class ChoiceDeltaFunctionCall(BaseModel):
22
+ arguments: Optional[str] = None
23
+ """
24
+ The arguments to call the function with, as generated by the model in JSON
25
+ format. Note that the model does not always generate valid JSON, and may
26
+ hallucinate parameters not defined by your function schema. Validate the
27
+ arguments in your code before calling your function.
28
+ """
29
+
30
+ name: Optional[str] = None
31
+ """The name of the function to call."""
32
+
33
+
34
+ class ChoiceDeltaToolCallFunction(BaseModel):
35
+ arguments: Optional[str] = None
36
+ """
37
+ The arguments to call the function with, as generated by the model in JSON
38
+ format. Note that the model does not always generate valid JSON, and may
39
+ hallucinate parameters not defined by your function schema. Validate the
40
+ arguments in your code before calling your function.
41
+ """
42
+
43
+ name: Optional[str] = None
44
+ """The name of the function to call."""
45
+
46
+
47
+ class ChoiceDeltaToolCall(BaseModel):
48
+ index: int
49
+
50
+ id: Optional[str] = None
51
+ """The ID of the tool call."""
52
+
53
+ function: Optional[ChoiceDeltaToolCallFunction] = None
54
+
55
+ type: Optional[Literal["function"]] = None
56
+ """The type of the tool. Currently, only `function` is supported."""
57
+
58
+
59
+ class ChoiceDelta(BaseModel):
60
+ content: Optional[str] = None
61
+ """The contents of the chunk message."""
62
+
63
+ function_call: Optional[ChoiceDeltaFunctionCall] = None
64
+ """Deprecated and replaced by `tool_calls`.
65
+
66
+ The name and arguments of a function that should be called, as generated by the
67
+ model.
68
+ """
69
+
70
+ refusal: Optional[str] = None
71
+ """The refusal message generated by the model."""
72
+
73
+ role: Optional[Literal["developer", "system", "user", "assistant", "tool"]] = None
74
+ """The role of the author of this message."""
75
+
76
+ tool_calls: Optional[List[ChoiceDeltaToolCall]] = None
77
+
78
+
79
+ class ChoiceLogprobs(BaseModel):
80
+ content: Optional[List[ChatCompletionTokenLogprob]] = None
81
+ """A list of message content tokens with log probability information."""
82
+
83
+ refusal: Optional[List[ChatCompletionTokenLogprob]] = None
84
+ """A list of message refusal tokens with log probability information."""
85
+
86
+
87
+ class Choice(BaseModel):
88
+ delta: ChoiceDelta
89
+ """A chat completion delta generated by streamed model responses."""
90
+
91
+ finish_reason: Optional[Literal["stop", "length", "tool_calls", "content_filter", "function_call"]] = None
92
+ """The reason the model stopped generating tokens.
93
+
94
+ This will be `stop` if the model hit a natural stop point or a provided stop
95
+ sequence, `length` if the maximum number of tokens specified in the request was
96
+ reached, `content_filter` if content was omitted due to a flag from our content
97
+ filters, `tool_calls` if the model called a tool, or `function_call`
98
+ (deprecated) if the model called a function.
99
+ """
100
+
101
+ index: int
102
+ """The index of the choice in the list of choices."""
103
+
104
+ logprobs: Optional[ChoiceLogprobs] = None
105
+ """Log probability information for the choice."""
106
+
107
+
108
+ class ChatCompletionChunk(BaseModel):
109
+ id: str
110
+ """A unique identifier for the chat completion. Each chunk has the same ID."""
111
+
112
+ choices: List[Choice]
113
+ """A list of chat completion choices.
114
+
115
+ Can contain more than one elements if `n` is greater than 1. Can also be empty
116
+ for the last chunk if you set `stream_options: {"include_usage": true}`.
117
+ """
118
+
119
+ created: int
120
+ """The Unix timestamp (in seconds) of when the chat completion was created.
121
+
122
+ Each chunk has the same timestamp.
123
+ """
124
+
125
+ model: str
126
+ """The model to generate the completion."""
127
+
128
+ object: Literal["chat.completion.chunk"]
129
+ """The object type, which is always `chat.completion.chunk`."""
130
+
131
+ service_tier: Optional[Literal["scale", "default"]] = None
132
+ """The service tier used for processing the request."""
133
+
134
+ system_fingerprint: Optional[str] = None
135
+ """
136
+ This fingerprint represents the backend configuration that the model runs with.
137
+ Can be used in conjunction with the `seed` request parameter to understand when
138
+ backend changes have been made that might impact determinism.
139
+ """
140
+
141
+ usage: Optional[CompletionUsage] = None
142
+ """
143
+ An optional field that will only be present when you set
144
+ `stream_options: {"include_usage": true}` in your request. When present, it
145
+ contains a null value except for the last chunk which contains the token usage
146
+ statistics for the entire request.
147
+ """
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_image_param.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionContentPartImageParam", "ImageURL"]
8
+
9
+
10
+ class ImageURL(TypedDict, total=False):
11
+ url: Required[str]
12
+ """Either a URL of the image or the base64 encoded image data."""
13
+
14
+ detail: Literal["auto", "low", "high"]
15
+ """Specifies the detail level of the image.
16
+
17
+ Learn more in the
18
+ [Vision guide](https://platform.openai.com/docs/guides/vision#low-or-high-fidelity-image-understanding).
19
+ """
20
+
21
+
22
+ class ChatCompletionContentPartImageParam(TypedDict, total=False):
23
+ image_url: Required[ImageURL]
24
+
25
+ type: Required[Literal["image_url"]]
26
+ """The type of the content part."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_input_audio_param.py ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionContentPartInputAudioParam", "InputAudio"]
8
+
9
+
10
+ class InputAudio(TypedDict, total=False):
11
+ data: Required[str]
12
+ """Base64 encoded audio data."""
13
+
14
+ format: Required[Literal["wav", "mp3"]]
15
+ """The format of the encoded audio data. Currently supports "wav" and "mp3"."""
16
+
17
+
18
+ class ChatCompletionContentPartInputAudioParam(TypedDict, total=False):
19
+ input_audio: Required[InputAudio]
20
+
21
+ type: Required[Literal["input_audio"]]
22
+ """The type of the content part. Always `input_audio`."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_param.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import TypeAlias
7
+
8
+ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
9
+ from .chat_completion_content_part_image_param import ChatCompletionContentPartImageParam
10
+ from .chat_completion_content_part_input_audio_param import ChatCompletionContentPartInputAudioParam
11
+
12
+ __all__ = ["ChatCompletionContentPartParam"]
13
+
14
+ ChatCompletionContentPartParam: TypeAlias = Union[
15
+ ChatCompletionContentPartTextParam, ChatCompletionContentPartImageParam, ChatCompletionContentPartInputAudioParam
16
+ ]
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_refusal_param.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionContentPartRefusalParam"]
8
+
9
+
10
+ class ChatCompletionContentPartRefusalParam(TypedDict, total=False):
11
+ refusal: Required[str]
12
+ """The refusal message generated by the model."""
13
+
14
+ type: Required[Literal["refusal"]]
15
+ """The type of the content part."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_content_part_text_param.py ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionContentPartTextParam"]
8
+
9
+
10
+ class ChatCompletionContentPartTextParam(TypedDict, total=False):
11
+ text: Required[str]
12
+ """The text content."""
13
+
14
+ type: Required[Literal["text"]]
15
+ """The type of the content part."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_developer_message_param.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
9
+
10
+ __all__ = ["ChatCompletionDeveloperMessageParam"]
11
+
12
+
13
+ class ChatCompletionDeveloperMessageParam(TypedDict, total=False):
14
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
15
+ """The contents of the developer message."""
16
+
17
+ role: Required[Literal["developer"]]
18
+ """The role of the messages author, in this case `developer`."""
19
+
20
+ name: str
21
+ """An optional name for the participant.
22
+
23
+ Provides the model information to differentiate between participants of the same
24
+ role.
25
+ """
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_call_option_param.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionFunctionCallOptionParam"]
8
+
9
+
10
+ class ChatCompletionFunctionCallOptionParam(TypedDict, total=False):
11
+ name: Required[str]
12
+ """The name of the function to call."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_function_message_param.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Optional
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ __all__ = ["ChatCompletionFunctionMessageParam"]
9
+
10
+
11
+ class ChatCompletionFunctionMessageParam(TypedDict, total=False):
12
+ content: Required[Optional[str]]
13
+ """The contents of the function message."""
14
+
15
+ name: Required[str]
16
+ """The name of the function to call."""
17
+
18
+ role: Required[Literal["function"]]
19
+ """The role of the messages author, in this case `function`."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+ from typing_extensions import Literal
5
+
6
+ from ..._models import BaseModel
7
+ from .chat_completion_audio import ChatCompletionAudio
8
+ from .chat_completion_message_tool_call import ChatCompletionMessageToolCall
9
+
10
+ __all__ = ["ChatCompletionMessage", "FunctionCall"]
11
+
12
+
13
+ class FunctionCall(BaseModel):
14
+ arguments: str
15
+ """
16
+ The arguments to call the function with, as generated by the model in JSON
17
+ format. Note that the model does not always generate valid JSON, and may
18
+ hallucinate parameters not defined by your function schema. Validate the
19
+ arguments in your code before calling your function.
20
+ """
21
+
22
+ name: str
23
+ """The name of the function to call."""
24
+
25
+
26
+ class ChatCompletionMessage(BaseModel):
27
+ content: Optional[str] = None
28
+ """The contents of the message."""
29
+
30
+ refusal: Optional[str] = None
31
+ """The refusal message generated by the model."""
32
+
33
+ role: Literal["assistant"]
34
+ """The role of the author of this message."""
35
+
36
+ audio: Optional[ChatCompletionAudio] = None
37
+ """
38
+ If the audio output modality is requested, this object contains data about the
39
+ audio response from the model.
40
+ [Learn more](https://platform.openai.com/docs/guides/audio).
41
+ """
42
+
43
+ function_call: Optional[FunctionCall] = None
44
+ """Deprecated and replaced by `tool_calls`.
45
+
46
+ The name and arguments of a function that should be called, as generated by the
47
+ model.
48
+ """
49
+
50
+ tool_calls: Optional[List[ChatCompletionMessageToolCall]] = None
51
+ """The tool calls generated by the model, such as function calls."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_param.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import TypeAlias
7
+
8
+ from .chat_completion_tool_message_param import ChatCompletionToolMessageParam
9
+ from .chat_completion_user_message_param import ChatCompletionUserMessageParam
10
+ from .chat_completion_system_message_param import ChatCompletionSystemMessageParam
11
+ from .chat_completion_function_message_param import ChatCompletionFunctionMessageParam
12
+ from .chat_completion_assistant_message_param import ChatCompletionAssistantMessageParam
13
+ from .chat_completion_developer_message_param import ChatCompletionDeveloperMessageParam
14
+
15
+ __all__ = ["ChatCompletionMessageParam"]
16
+
17
+ ChatCompletionMessageParam: TypeAlias = Union[
18
+ ChatCompletionDeveloperMessageParam,
19
+ ChatCompletionSystemMessageParam,
20
+ ChatCompletionUserMessageParam,
21
+ ChatCompletionAssistantMessageParam,
22
+ ChatCompletionToolMessageParam,
23
+ ChatCompletionFunctionMessageParam,
24
+ ]
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal
4
+
5
+ from ..._models import BaseModel
6
+
7
+ __all__ = ["ChatCompletionMessageToolCall", "Function"]
8
+
9
+
10
+ class Function(BaseModel):
11
+ arguments: str
12
+ """
13
+ The arguments to call the function with, as generated by the model in JSON
14
+ format. Note that the model does not always generate valid JSON, and may
15
+ hallucinate parameters not defined by your function schema. Validate the
16
+ arguments in your code before calling your function.
17
+ """
18
+
19
+ name: str
20
+ """The name of the function to call."""
21
+
22
+
23
+ class ChatCompletionMessageToolCall(BaseModel):
24
+ id: str
25
+ """The ID of the tool call."""
26
+
27
+ function: Function
28
+ """The function that the model called."""
29
+
30
+ type: Literal["function"]
31
+ """The type of the tool. Currently, only `function` is supported."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_message_tool_call_param.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionMessageToolCallParam", "Function"]
8
+
9
+
10
+ class Function(TypedDict, total=False):
11
+ arguments: Required[str]
12
+ """
13
+ The arguments to call the function with, as generated by the model in JSON
14
+ format. Note that the model does not always generate valid JSON, and may
15
+ hallucinate parameters not defined by your function schema. Validate the
16
+ arguments in your code before calling your function.
17
+ """
18
+
19
+ name: Required[str]
20
+ """The name of the function to call."""
21
+
22
+
23
+ class ChatCompletionMessageToolCallParam(TypedDict, total=False):
24
+ id: Required[str]
25
+ """The ID of the tool call."""
26
+
27
+ function: Required[Function]
28
+ """The function that the model called."""
29
+
30
+ type: Required[Literal["function"]]
31
+ """The type of the tool. Currently, only `function` is supported."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_modality.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal, TypeAlias
4
+
5
+ __all__ = ["ChatCompletionModality"]
6
+
7
+ ChatCompletionModality: TypeAlias = Literal["text", "audio"]
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_named_tool_choice_param.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ __all__ = ["ChatCompletionNamedToolChoiceParam", "Function"]
8
+
9
+
10
+ class Function(TypedDict, total=False):
11
+ name: Required[str]
12
+ """The name of the function to call."""
13
+
14
+
15
+ class ChatCompletionNamedToolChoiceParam(TypedDict, total=False):
16
+ function: Required[Function]
17
+
18
+ type: Required[Literal["function"]]
19
+ """The type of the tool. Currently, only `function` is supported."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_prediction_content_param.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
9
+
10
+ __all__ = ["ChatCompletionPredictionContentParam"]
11
+
12
+
13
+ class ChatCompletionPredictionContentParam(TypedDict, total=False):
14
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
15
+ """
16
+ The content that should be matched when generating a model response. If
17
+ generated tokens would match this content, the entire model response can be
18
+ returned much more quickly.
19
+ """
20
+
21
+ type: Required[Literal["content"]]
22
+ """The type of the predicted content you want to provide.
23
+
24
+ This type is currently always `content`.
25
+ """
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_reasoning_effort.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal, TypeAlias
4
+
5
+ __all__ = ["ChatCompletionReasoningEffort"]
6
+
7
+ ChatCompletionReasoningEffort: TypeAlias = Literal["low", "medium", "high"]
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_role.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing_extensions import Literal, TypeAlias
4
+
5
+ __all__ = ["ChatCompletionRole"]
6
+
7
+ ChatCompletionRole: TypeAlias = Literal["developer", "system", "user", "assistant", "tool", "function"]
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_stream_options_param.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import TypedDict
6
+
7
+ __all__ = ["ChatCompletionStreamOptionsParam"]
8
+
9
+
10
+ class ChatCompletionStreamOptionsParam(TypedDict, total=False):
11
+ include_usage: bool
12
+ """If set, an additional chunk will be streamed before the `data: [DONE]` message.
13
+
14
+ The `usage` field on this chunk shows the token usage statistics for the entire
15
+ request, and the `choices` field will always be an empty array. All other chunks
16
+ will also include a `usage` field, but with a null value.
17
+ """
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_system_message_param.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
9
+
10
+ __all__ = ["ChatCompletionSystemMessageParam"]
11
+
12
+
13
+ class ChatCompletionSystemMessageParam(TypedDict, total=False):
14
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
15
+ """The contents of the system message."""
16
+
17
+ role: Required[Literal["system"]]
18
+ """The role of the messages author, in this case `system`."""
19
+
20
+ name: str
21
+ """An optional name for the participant.
22
+
23
+ Provides the model information to differentiate between participants of the same
24
+ role.
25
+ """
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_token_logprob.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from typing import List, Optional
4
+
5
+ from ..._models import BaseModel
6
+
7
+ __all__ = ["ChatCompletionTokenLogprob", "TopLogprob"]
8
+
9
+
10
+ class TopLogprob(BaseModel):
11
+ token: str
12
+ """The token."""
13
+
14
+ bytes: Optional[List[int]] = None
15
+ """A list of integers representing the UTF-8 bytes representation of the token.
16
+
17
+ Useful in instances where characters are represented by multiple tokens and
18
+ their byte representations must be combined to generate the correct text
19
+ representation. Can be `null` if there is no bytes representation for the token.
20
+ """
21
+
22
+ logprob: float
23
+ """The log probability of this token, if it is within the top 20 most likely
24
+ tokens.
25
+
26
+ Otherwise, the value `-9999.0` is used to signify that the token is very
27
+ unlikely.
28
+ """
29
+
30
+
31
+ class ChatCompletionTokenLogprob(BaseModel):
32
+ token: str
33
+ """The token."""
34
+
35
+ bytes: Optional[List[int]] = None
36
+ """A list of integers representing the UTF-8 bytes representation of the token.
37
+
38
+ Useful in instances where characters are represented by multiple tokens and
39
+ their byte representations must be combined to generate the correct text
40
+ representation. Can be `null` if there is no bytes representation for the token.
41
+ """
42
+
43
+ logprob: float
44
+ """The log probability of this token, if it is within the top 20 most likely
45
+ tokens.
46
+
47
+ Otherwise, the value `-9999.0` is used to signify that the token is very
48
+ unlikely.
49
+ """
50
+
51
+ top_logprobs: List[TopLogprob]
52
+ """List of the most likely tokens and their log probability, at this token
53
+ position.
54
+
55
+ In rare cases, there may be fewer than the number of requested `top_logprobs`
56
+ returned.
57
+ """
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_choice_option_param.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union
6
+ from typing_extensions import Literal, TypeAlias
7
+
8
+ from .chat_completion_named_tool_choice_param import ChatCompletionNamedToolChoiceParam
9
+
10
+ __all__ = ["ChatCompletionToolChoiceOptionParam"]
11
+
12
+ ChatCompletionToolChoiceOptionParam: TypeAlias = Union[
13
+ Literal["none", "auto", "required"], ChatCompletionNamedToolChoiceParam
14
+ ]
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_message_param.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ from .chat_completion_content_part_text_param import ChatCompletionContentPartTextParam
9
+
10
+ __all__ = ["ChatCompletionToolMessageParam"]
11
+
12
+
13
+ class ChatCompletionToolMessageParam(TypedDict, total=False):
14
+ content: Required[Union[str, Iterable[ChatCompletionContentPartTextParam]]]
15
+ """The contents of the tool message."""
16
+
17
+ role: Required[Literal["tool"]]
18
+ """The role of the messages author, in this case `tool`."""
19
+
20
+ tool_call_id: Required[str]
21
+ """Tool call that this message is responding to."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_tool_param.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing_extensions import Literal, Required, TypedDict
6
+
7
+ from ..shared_params.function_definition import FunctionDefinition
8
+
9
+ __all__ = ["ChatCompletionToolParam"]
10
+
11
+
12
+ class ChatCompletionToolParam(TypedDict, total=False):
13
+ function: Required[FunctionDefinition]
14
+
15
+ type: Required[Literal["function"]]
16
+ """The type of the tool. Currently, only `function` is supported."""
.venv/lib/python3.11/site-packages/openai/types/chat/chat_completion_user_message_param.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # File generated from our OpenAPI spec by Stainless. See CONTRIBUTING.md for details.
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import Union, Iterable
6
+ from typing_extensions import Literal, Required, TypedDict
7
+
8
+ from .chat_completion_content_part_param import ChatCompletionContentPartParam
9
+
10
+ __all__ = ["ChatCompletionUserMessageParam"]
11
+
12
+
13
+ class ChatCompletionUserMessageParam(TypedDict, total=False):
14
+ content: Required[Union[str, Iterable[ChatCompletionContentPartParam]]]
15
+ """The contents of the user message."""
16
+
17
+ role: Required[Literal["user"]]
18
+ """The role of the messages author, in this case `user`."""
19
+
20
+ name: str
21
+ """An optional name for the participant.
22
+
23
+ Provides the model information to differentiate between participants of the same
24
+ role.
25
+ """