ZTWHHH commited on
Commit
19dc7a1
·
verified ·
1 Parent(s): 9908a54

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. .gitattributes +1 -0
  2. infer_4_30_0/lib/python3.10/site-packages/matplotlib/__pycache__/colors.cpython-310.pyc +3 -0
  3. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/__init__.cpython-310.pyc +0 -0
  4. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/async_timeout.cpython-310.pyc +0 -0
  5. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/llm_engine.cpython-310.pyc +0 -0
  6. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/metrics.cpython-310.pyc +0 -0
  7. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/metrics_types.cpython-310.pyc +0 -0
  8. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/protocol.cpython-310.pyc +0 -0
  9. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/metrics_types.py +102 -0
  10. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__init__.py +0 -0
  11. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/__init__.cpython-310.pyc +0 -0
  12. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/interfaces.cpython-310.pyc +0 -0
  13. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/multi_step.cpython-310.pyc +0 -0
  14. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/single_step.cpython-310.pyc +0 -0
  15. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/stop_checker.cpython-310.pyc +0 -0
  16. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/util.cpython-310.pyc +0 -0
  17. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/interfaces.py +74 -0
  18. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/multi_step.py +205 -0
  19. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/single_step.py +136 -0
  20. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/stop_checker.py +130 -0
  21. infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/util.py +27 -0
  22. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/__init__.py +0 -0
  23. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/api_server.py +170 -0
  24. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/chat_utils.py +1007 -0
  25. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/launcher.py +108 -0
  26. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/llm.py +1421 -0
  27. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/logger.py +44 -0
  28. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__init__.py +0 -0
  29. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/api_server.cpython-310.pyc +0 -0
  30. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_engine.cpython-310.pyc +0 -0
  31. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_pooling.cpython-310.pyc +0 -0
  32. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_rerank.cpython-310.pyc +0 -0
  33. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_tokenization.cpython-310.pyc +0 -0
  34. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py +991 -0
  35. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/cli_args.py +305 -0
  36. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/logits_processors.py +88 -0
  37. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/protocol.py +1593 -0
  38. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__init__.py +8 -0
  39. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__pycache__/__init__.cpython-310.pyc +0 -0
  40. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__pycache__/abs_reasoning_parsers.cpython-310.pyc +0 -0
  41. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__pycache__/deepseek_r1_reasoning_parser.cpython-310.pyc +0 -0
  42. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/abs_reasoning_parsers.py +160 -0
  43. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/deepseek_r1_reasoning_parser.py +147 -0
  44. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/run_batch.py +435 -0
  45. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_chat.py +961 -0
  46. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_completion.py +547 -0
  47. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_embedding.py +242 -0
  48. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_engine.py +527 -0
  49. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_models.py +244 -0
  50. infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_pooling.py +235 -0
.gitattributes CHANGED
@@ -1007,3 +1007,4 @@ infer_4_30_0/lib/python3.10/site-packages/scipy/stats/__pycache__/_distribution_
1007
  infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1008
  infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1009
  infer_4_30_0/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
 
 
1007
  infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_peak_finding_utils.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1008
  infer_4_30_0/lib/python3.10/site-packages/scipy/signal/_upfirdn_apply.cpython-310-x86_64-linux-gnu.so filter=lfs diff=lfs merge=lfs -text
1009
  infer_4_30_0/lib/python3.10/site-packages/pkg_resources/__pycache__/__init__.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
1010
+ infer_4_30_0/lib/python3.10/site-packages/matplotlib/__pycache__/colors.cpython-310.pyc filter=lfs diff=lfs merge=lfs -text
infer_4_30_0/lib/python3.10/site-packages/matplotlib/__pycache__/colors.cpython-310.pyc ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7d7bddc08eb8ee1873f8b72ed965cc6b1aeb31f25dc5a240b2f36013785513b9
3
+ size 118850
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (169 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/async_timeout.cpython-310.pyc ADDED
Binary file (5.67 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/llm_engine.cpython-310.pyc ADDED
Binary file (50.1 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/metrics.cpython-310.pyc ADDED
Binary file (20.5 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/metrics_types.cpython-310.pyc ADDED
Binary file (3.9 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/__pycache__/protocol.cpython-310.pyc ADDED
Binary file (8.55 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/metrics_types.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """
3
+ These types are defined in this file to avoid importing vllm.engine.metrics
4
+ and therefore importing prometheus_client.
5
+
6
+ This is required due to usage of Prometheus multiprocess mode to enable
7
+ metrics after splitting out the uvicorn process from the engine process.
8
+
9
+ Prometheus multiprocess mode requires setting PROMETHEUS_MULTIPROC_DIR
10
+ before prometheus_client is imported. Typically, this is done by setting
11
+ the env variable before launch, but since we are a library, we need to
12
+ do this in Python code and lazily import prometheus_client.
13
+ """
14
+
15
+ import time
16
+ from abc import ABC, abstractmethod
17
+ from dataclasses import dataclass
18
+ from typing import Dict, List, Optional, Protocol
19
+
20
+ from vllm.config import VllmConfig
21
+ from vllm.spec_decode.metrics import SpecDecodeWorkerMetrics
22
+
23
+
24
+ @dataclass
25
+ class Stats:
26
+ """Created by LLMEngine for use by StatLogger."""
27
+ now: float
28
+
29
+ # System stats (should have _sys suffix)
30
+ # Scheduler State
31
+ num_running_sys: int
32
+ num_waiting_sys: int
33
+ num_swapped_sys: int
34
+ # KV Cache Usage in %
35
+ gpu_cache_usage_sys: float
36
+ cpu_cache_usage_sys: float
37
+ # Prefix caching block hit rate
38
+ cpu_prefix_cache_hit_rate: float
39
+ gpu_prefix_cache_hit_rate: float
40
+
41
+ # Iteration stats (should have _iter suffix)
42
+ num_prompt_tokens_iter: int
43
+ num_generation_tokens_iter: int
44
+ num_tokens_iter: int
45
+ time_to_first_tokens_iter: List[float]
46
+ time_per_output_tokens_iter: List[float]
47
+ num_preemption_iter: int
48
+
49
+ # Request stats (should have _requests suffix)
50
+ # Latency
51
+ time_e2e_requests: List[float]
52
+ time_queue_requests: List[float]
53
+ time_inference_requests: List[float]
54
+ time_prefill_requests: List[float]
55
+ time_decode_requests: List[float]
56
+ time_in_queue_requests: List[float]
57
+ model_forward_time_requests: List[float]
58
+ model_execute_time_requests: List[float]
59
+ # Metadata
60
+ num_prompt_tokens_requests: List[int]
61
+ num_generation_tokens_requests: List[int]
62
+ n_requests: List[int]
63
+ max_num_generation_tokens_requests: List[int]
64
+ max_tokens_requests: List[int]
65
+ finished_reason_requests: List[str]
66
+ waiting_lora_adapters: List[str]
67
+ running_lora_adapters: List[str]
68
+ max_lora: str
69
+
70
+ spec_decode_metrics: Optional["SpecDecodeWorkerMetrics"] = None
71
+
72
+
73
+ class SupportsMetricsInfo(Protocol):
74
+
75
+ def metrics_info(self) -> Dict[str, str]:
76
+ ...
77
+
78
+
79
+ class StatLoggerBase(ABC):
80
+ """Base class for StatLogger."""
81
+
82
+ def __init__(self, local_interval: float, vllm_config: VllmConfig) -> None:
83
+ # Tracked stats over current local logging interval.
84
+ self.num_prompt_tokens: List[int] = []
85
+ self.num_generation_tokens: List[int] = []
86
+ self.last_local_log = time.time()
87
+ self.local_interval = local_interval
88
+ self.spec_decode_metrics: Optional[SpecDecodeWorkerMetrics] = None
89
+
90
+ @abstractmethod
91
+ def log(self, stats: Stats) -> None:
92
+ raise NotImplementedError
93
+
94
+ @abstractmethod
95
+ def info(self, type: str, obj: SupportsMetricsInfo) -> None:
96
+ raise NotImplementedError
97
+
98
+ def maybe_update_spec_decode_metrics(self, stats: Stats):
99
+ """Save spec decode metrics (since they are unlikely
100
+ to be emitted at same time as log interval)."""
101
+ if stats.spec_decode_metrics is not None:
102
+ self.spec_decode_metrics = stats.spec_decode_metrics
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (186 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/interfaces.cpython-310.pyc ADDED
Binary file (3.08 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/multi_step.cpython-310.pyc ADDED
Binary file (6.87 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/single_step.cpython-310.pyc ADDED
Binary file (5.25 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/stop_checker.cpython-310.pyc ADDED
Binary file (3.34 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/__pycache__/util.cpython-310.pyc ADDED
Binary file (1.13 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/interfaces.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from abc import ABC, abstractmethod
4
+ from typing import Callable, List
5
+
6
+ from vllm.config import SchedulerConfig
7
+ from vllm.core.scheduler import Scheduler
8
+ from vllm.engine.output_processor.stop_checker import StopChecker
9
+ from vllm.sequence import Sequence, SequenceGroup, SequenceGroupOutput
10
+ from vllm.transformers_utils.detokenizer import Detokenizer
11
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
12
+ from vllm.utils import Counter
13
+
14
+
15
+ class SequenceGroupOutputProcessor(ABC):
16
+ """Interface for logic that processes new token ids in sequence groups,
17
+ managing detokenization, stop checking, and freeing/forking sequences with
18
+ the scheduler.
19
+
20
+ This is highly coupled with the LLMEngine and should be seen as an extension
21
+ of it. The logic is separated to simplify the LLMEngine class and allow
22
+ separate implementations for single-step decoding (which supports beam
23
+ search sequence forking) and multi-step decoding (which does not support
24
+ beam search, but does support speculative decoding).
25
+ """
26
+
27
+ @staticmethod
28
+ def create_output_processor(
29
+ scheduler_config: SchedulerConfig,
30
+ detokenizer: Detokenizer,
31
+ scheduler: List[Scheduler],
32
+ seq_counter: Counter,
33
+ get_tokenizer_for_seq: Callable[[Sequence], AnyTokenizer],
34
+ stop_checker: "StopChecker",
35
+ ):
36
+ """Create an output processor.
37
+
38
+ This returns a single-step output processor if num_lookahead_slots is
39
+ zero, else returns a multi-step output processor.
40
+ """
41
+ if scheduler_config.num_lookahead_slots == 0:
42
+ # Importing here to avoid cycle.
43
+ from vllm.engine.output_processor.single_step import (
44
+ SingleStepOutputProcessor)
45
+ return SingleStepOutputProcessor(scheduler_config, detokenizer,
46
+ scheduler, seq_counter,
47
+ stop_checker)
48
+ else:
49
+ # Importing here to avoid cycle.
50
+ from vllm.engine.output_processor.multi_step import (
51
+ MultiStepOutputProcessor)
52
+ return MultiStepOutputProcessor(
53
+ detokenizer,
54
+ scheduler,
55
+ seq_counter,
56
+ get_tokenizer_for_seq,
57
+ stop_checker,
58
+ )
59
+
60
+ @abstractmethod
61
+ def process_outputs(self, sequence_group: SequenceGroup,
62
+ outputs: List[SequenceGroupOutput],
63
+ is_async: bool) -> None:
64
+ """Process new token ids for the sequence group. Handles logic such as
65
+ detokenization, stop checking, and freeing/forking sequences in the
66
+ scheduler.
67
+ """
68
+ pass
69
+
70
+ @abstractmethod
71
+ def process_prompt_logprob(self, seq_group: SequenceGroup,
72
+ outputs: List[SequenceGroupOutput]) -> None:
73
+ """Update prompt logprobs received from outputs to seq_group."""
74
+ pass
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/multi_step.py ADDED
@@ -0,0 +1,205 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import functools
4
+ from typing import Callable, List, cast
5
+
6
+ from vllm.core.scheduler import Scheduler
7
+ from vllm.engine.output_processor.interfaces import (
8
+ SequenceGroupOutputProcessor)
9
+ from vllm.engine.output_processor.single_step import (
10
+ single_step_process_prompt_logprob)
11
+ from vllm.engine.output_processor.stop_checker import StopChecker
12
+ from vllm.logger import init_logger
13
+ from vllm.sampling_params import SamplingParams
14
+ from vllm.sequence import (VLLM_INVALID_TOKEN_ID,
15
+ CompletionSequenceGroupOutput, Sequence,
16
+ SequenceGroup, SequenceGroupOutput, SequenceOutput,
17
+ SequenceStatus)
18
+ from vllm.transformers_utils.detokenizer import Detokenizer
19
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
20
+ from vllm.utils import Counter
21
+
22
+ logger = init_logger(__name__)
23
+
24
+
25
+ class MultiStepOutputProcessor(SequenceGroupOutputProcessor):
26
+ """SequenceGroupOutputProcessor which handles logic related to
27
+ detokenization and stopping conditions. It specializes to "multi-step
28
+ decoding", where vLLM's worker may generate multiple tokens per invocation.
29
+ This is currently mutually exclusive with advanced sampling techniques like
30
+ beam search, which motivates the separation of this logic from the single
31
+ step output processor.
32
+
33
+ This class is responsible for things such as correctly appending all new
34
+ token ids to their sequence, detokenizing new token ids, truncating new
35
+ output tokens after an eos token, and correctly handling the case where the
36
+ number of new output tokens per sequence differs in a single batch.
37
+ """
38
+
39
+ def __init__(
40
+ self,
41
+ detokenizer: Detokenizer,
42
+ scheduler: List[Scheduler],
43
+ seq_counter: Counter,
44
+ get_tokenizer_for_seq: Callable[[Sequence], AnyTokenizer],
45
+ stop_checker: StopChecker,
46
+ ):
47
+ self.detokenizer = detokenizer
48
+ self.scheduler = scheduler
49
+ self.seq_counter = seq_counter
50
+ self.get_tokenizer_for_seq = get_tokenizer_for_seq
51
+ self.stop_checker = stop_checker
52
+
53
+ def process_prompt_logprob(self, seq_group: SequenceGroup,
54
+ outputs: List[SequenceGroupOutput]) -> None:
55
+ """Process prompt logprobs associated with each step of a multi-step-
56
+ scheduled computation.
57
+
58
+ Args:
59
+ seq_group: the outputs are associated with this :class:`SequenceGroup`
60
+ outputs: the :class:`SequenceGroupOutput`s for all scheduler steps
61
+ """
62
+ for output in outputs:
63
+ # Concatenate single-step prompt logprob processing results.
64
+ assert isinstance(output, CompletionSequenceGroupOutput)
65
+ single_step_process_prompt_logprob(self, seq_group, output)
66
+
67
+ @staticmethod
68
+ @functools.lru_cache
69
+ def _log_prompt_logprob_unsupported_warning_once():
70
+ # Reminder: Please update docs/source/features/compatibility_matrix.md
71
+ # If the feature combo become valid
72
+ logger.warning(
73
+ "Prompt logprob is not supported by multi step workers. "
74
+ "(e.g., speculative decode uses multi step workers).")
75
+
76
+ def process_outputs(self,
77
+ sequence_group: SequenceGroup,
78
+ outputs: List[SequenceGroupOutput],
79
+ is_async: bool = False) -> None:
80
+ """Append new tokens in the outputs to sequences in the sequence group.
81
+
82
+ This only supports sequence groups of size 1. It supports greater than
83
+ one new token per sequence.
84
+
85
+ This applies logic like stop condition checking and detokenization.
86
+ It also handles cases where there are tokens emitted after
87
+ the EOS token.
88
+
89
+ is_async - Indicates whether this postprocessor runs in
90
+ parallel with the GPU forward pass and is processing
91
+ tokens from the previous step. If this is true, then
92
+ no tokens need to be appended since it is already done
93
+ externally (before the next schedule() call)
94
+ """
95
+ # Sequences can be in RUNNING or FINISHED_ABORTED state
96
+ # once scheduled, as a sequence is moved to FINSIHED_ABORTED
97
+ # if a client disconnects from the api server.
98
+ seqs = sequence_group.get_seqs(status=SequenceStatus.RUNNING)
99
+ if seqs is None:
100
+ seqs = sequence_group.get_seqs(
101
+ status=SequenceStatus.FINISHED_ABORTED)
102
+
103
+ assert seqs, "Expected RUNNING or FINISHED_ABORTED sequences"
104
+ assert len(seqs) == 1, (
105
+ "Beam search not supported in multi-step decoding.")
106
+ seq = seqs[0]
107
+ seq_id = seq.seq_id
108
+ # This method is defined in the more generic
109
+ # SequenceGroupOutputProcessor, but here we assume that the outputs are
110
+ # of a more specific type.
111
+ assert all([
112
+ isinstance(output, CompletionSequenceGroupOutput)
113
+ for output in outputs
114
+ ])
115
+ compl_outputs = cast(List[CompletionSequenceGroupOutput], outputs)
116
+ assert all([
117
+ seq_id == output.samples[0].parent_seq_id
118
+ for output in compl_outputs
119
+ ])
120
+
121
+ if is_async:
122
+ # Async case: We process tokens one by one. Here, we know the token
123
+ # was already appended, so we only need to do the rest of the
124
+ # postprocessor: Detokenization + stopping logic
125
+ self._process_decode_and_stop(seq, sequence_group.sampling_params)
126
+ else:
127
+ # Standard multi-step case
128
+
129
+ # Since there's only one sequence per sequence group,
130
+ # we can take the first sample.
131
+ samples = [output.samples[0] for output in compl_outputs]
132
+
133
+ # entries in sample tokens may be invalid (eg. due to spec decode
134
+ # rejecting tokens).
135
+ valid_samples = [
136
+ sample for sample in samples
137
+ if sample.output_token != VLLM_INVALID_TOKEN_ID
138
+ ]
139
+
140
+ # When both spec-decode and pre-fill chunking are enabled, we
141
+ # don't have guaranteed samples here (e.g. all -1s).
142
+ if valid_samples:
143
+ self._process_seq_outputs(seq, valid_samples,
144
+ sequence_group.sampling_params)
145
+
146
+ def _process_decode_and_stop(self, seq: Sequence,
147
+ sampling_params: SamplingParams) -> None:
148
+ new_char_count = 0
149
+ if sampling_params.detokenize and self.detokenizer:
150
+ new_char_count = self.detokenizer.decode_sequence_inplace(
151
+ seq, sampling_params)
152
+
153
+ # TODO(sang): Support lora.
154
+ self.stop_checker.maybe_stop_sequence(
155
+ seq,
156
+ new_char_count=new_char_count,
157
+ sampling_params=sampling_params,
158
+ )
159
+
160
+ def _process_seq_outputs(self, seq: Sequence,
161
+ valid_samples: List[SequenceOutput],
162
+ sampling_params: SamplingParams) -> None:
163
+ output_token_ids = [sample.output_token for sample in valid_samples]
164
+ output_logprobs = [sample.logprobs for sample in valid_samples]
165
+
166
+ # Truncate to max_tokens if necessary.
167
+ remaining_tokens = sampling_params.max_tokens - (seq.get_output_len() +
168
+ len(output_token_ids))
169
+ if remaining_tokens < 0:
170
+ output_token_ids = output_token_ids[:remaining_tokens]
171
+
172
+ # Truncate any tokens after EOS. This is required as spec decode
173
+ # generates a fixed number of tokens without evaluating stopping
174
+ # conditions within the block. This can cause an eos token to be
175
+ # unintentionally ignored.
176
+ if not sampling_params.ignore_eos:
177
+ eos_token_id = self.get_tokenizer_for_seq(seq).eos_token_id
178
+ # Avoiding .index calls as exception throwing in the happy path
179
+ # is expensive.
180
+ for i in range(len(output_token_ids)):
181
+ if output_token_ids[i] == eos_token_id:
182
+ output_token_ids = output_token_ids[:i + 1]
183
+ break
184
+
185
+ is_prefill_sampled_token = seq.data.get_num_uncomputed_tokens() == 0
186
+ # Incrementally append tokens to the sequence, as if we had only one new
187
+ # token.
188
+ for output_token_id, output_logprob in zip(output_token_ids,
189
+ output_logprobs):
190
+ seq.append_token_id(
191
+ token_id=output_token_id,
192
+ logprobs=output_logprob,
193
+ )
194
+
195
+ if is_prefill_sampled_token:
196
+ is_prefill_sampled_token = False
197
+ else:
198
+ # Update num_computed_tokens iff the sampled token is not from
199
+ # a prefill step.
200
+ seq.data.update_num_computed_tokens(1)
201
+
202
+ self._process_decode_and_stop(seq, sampling_params)
203
+
204
+ if seq.is_finished():
205
+ break
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/single_step.py ADDED
@@ -0,0 +1,136 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from typing import List
4
+
5
+ from vllm.config import SchedulerConfig
6
+ from vllm.core.scheduler import Scheduler
7
+ from vllm.engine.output_processor.interfaces import (
8
+ SequenceGroupOutputProcessor)
9
+ from vllm.engine.output_processor.stop_checker import StopChecker
10
+ from vllm.logger import init_logger
11
+ from vllm.sequence import (CompletionSequenceGroupOutput, SequenceGroup,
12
+ SequenceGroupOutput)
13
+ from vllm.transformers_utils.detokenizer import Detokenizer
14
+ from vllm.utils import Counter
15
+
16
+ logger = init_logger(__name__)
17
+
18
+
19
+ def single_step_process_prompt_logprob(
20
+ sg_output_proc: SequenceGroupOutputProcessor, seq_group: SequenceGroup,
21
+ output: CompletionSequenceGroupOutput) -> None:
22
+ """Process prompt logprobs associated with the :class:`SequenceGroupOutput`
23
+ for a given step.
24
+
25
+ Do nothing if the output has no prompt logprobs.
26
+
27
+ Account for the fact that transformers do not compute first-token logprobs.
28
+
29
+ Args:
30
+ sg_output_proc: :class:`SequenceGroupOutputProcessor` instance
31
+ seq_group: the output is associated with this :class:`SequenceGroup`
32
+ output: the :class:`SequenceGroupOutput` for a single scheduler step
33
+ """
34
+ prompt_logprobs = output.prompt_logprobs
35
+
36
+ # If this is the first (or only) "chunk" of the prefill, we need
37
+ # to prepend None to the list of prompt logprobs. The reason for this
38
+ # is that for N prompt tokens, the Sampler will generate N-1 total
39
+ # prompt logprobs during prefill since the token at idx 0 will not
40
+ # have a logprob associated with it.
41
+ if prompt_logprobs is not None:
42
+ if not seq_group.prompt_logprobs:
43
+ prompt_logprobs = [None] + prompt_logprobs
44
+ seq_group.prompt_logprobs = []
45
+
46
+ assert hasattr(sg_output_proc, 'detokenizer')
47
+ if (seq_group.sampling_params.detokenize
48
+ and sg_output_proc.detokenizer):
49
+ sg_output_proc.detokenizer.decode_prompt_logprobs_inplace(
50
+ seq_group,
51
+ prompt_logprobs,
52
+ position_offset=len(seq_group.prompt_logprobs))
53
+
54
+ seq_group.prompt_logprobs.extend(prompt_logprobs)
55
+
56
+
57
+ class SingleStepOutputProcessor(SequenceGroupOutputProcessor):
58
+ """SequenceGroupOutputProcessor which handles "output processing" logic,
59
+ which happens after the model returns generated token ids and before
60
+ scheduling of the next batch. Output processing logic includes
61
+ detokenization, and determining if a sequence is finished (e.g. via max len
62
+ or eos token).
63
+
64
+ The SingleStepOutputProcessor is specialized to the case where the model
65
+ emits at most a single token per invocation, which precludes configurations
66
+ such as speculative decoding or multi-step decoding. This enables beam
67
+ search sampling, which requires forking/finishing/freeing sequences in a way
68
+ that is currently difficult to schedule multiple steps ahead of time.
69
+ """
70
+
71
+ def __init__(self, scheduler_config: SchedulerConfig,
72
+ detokenizer: Detokenizer, scheduler: List[Scheduler],
73
+ seq_counter: Counter, stop_checker: StopChecker):
74
+ self.scheduler_config = scheduler_config
75
+ self.detokenizer = detokenizer
76
+ self.scheduler = scheduler
77
+ self.seq_counter = seq_counter
78
+ self.stop_checker = stop_checker
79
+
80
+ def process_outputs(self, sequence_group: SequenceGroup,
81
+ outputs: List[SequenceGroupOutput],
82
+ is_async: bool) -> None:
83
+ """Append all new tokens to sequences in the sequence group. Fork any
84
+ surviving beam candidates; free any unsurviving ones.
85
+
86
+ Invokes detokenizer to detokenize new tokens, and also marks sequences
87
+ as finished if they meet stop conditions.
88
+
89
+ is_async - Indicates whether this postprocessor runs in
90
+ parallel with the GPU forward pass and is processing
91
+ tokens from the previous step. If this is true, then
92
+ no tokens need to be appended since it is already done
93
+ externally (before the next schedule() call)
94
+ """
95
+ assert (len(outputs) == 1
96
+ ), f"{type(self)} does not support multiple outputs per step"
97
+ return self._process_sequence_group_outputs(sequence_group, outputs[0],
98
+ is_async)
99
+
100
+ def process_prompt_logprob(self, seq_group: SequenceGroup,
101
+ outputs: List[SequenceGroupOutput]) -> None:
102
+ """Process prompt logprobs associated with one step of a single-step-
103
+ scheduled computation.
104
+
105
+ Args:
106
+ seq_group: the output is associated with this :class:`SequenceGroup`
107
+ outputs: the :class:`SequenceGroupOutput` for a single scheduler step
108
+ """
109
+ assert len(outputs) == 1, "Single step should only have 1 output."
110
+ output = outputs[0]
111
+ assert isinstance(output, CompletionSequenceGroupOutput)
112
+ single_step_process_prompt_logprob(self, seq_group, output)
113
+
114
+ def _process_sequence_group_outputs(self, seq_group: SequenceGroup,
115
+ outputs: SequenceGroupOutput,
116
+ is_async: bool) -> None:
117
+ sampling_params = seq_group.sampling_params
118
+
119
+ sample = outputs.samples[0]
120
+ seq = seq_group.first_seq
121
+ if not is_async:
122
+ seq.append_token_id(sample.output_token, sample.logprobs)
123
+ if sampling_params.detokenize and self.detokenizer:
124
+ new_char_count = self.detokenizer.decode_sequence_inplace(
125
+ seq, sampling_params)
126
+ else:
127
+ new_char_count = 0
128
+ self.stop_checker.maybe_stop_sequence(
129
+ seq,
130
+ new_char_count,
131
+ sampling_params,
132
+ lora_req=seq_group.lora_request,
133
+ )
134
+ if seq.is_finished():
135
+ for scheduler in self.scheduler:
136
+ scheduler.free_seq(seq)
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/stop_checker.py ADDED
@@ -0,0 +1,130 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from typing import Callable, List, Optional, Tuple
4
+
5
+ from vllm.lora.request import LoRARequest
6
+ from vllm.sampling_params import SamplingParams
7
+ from vllm.sequence import Sequence, SequenceStatus
8
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
9
+
10
+
11
+ class StopChecker:
12
+ """LLMEngine helper class which separates out the logic involving stop
13
+ checking. This checks things such as: whether the eos token was emitted,
14
+ whether the max_tokens has been consumed, whether a stop string has been
15
+ emitted, or if we have exceeded the max model len.
16
+ """
17
+
18
+ def __init__(self, max_model_len: int,
19
+ get_tokenizer_for_seq: Callable[[Sequence], AnyTokenizer]):
20
+ # Do not use it directly, but use `self._get_max_model_len`.
21
+ self._max_model_len = max_model_len
22
+ self.get_tokenizer_for_seq = get_tokenizer_for_seq
23
+
24
+ def _get_max_model_len(self, lora_req: Optional[LoRARequest]):
25
+ if lora_req and lora_req.long_lora_max_len:
26
+ return lora_req.long_lora_max_len
27
+ else:
28
+ return self._max_model_len
29
+
30
+ def maybe_stop_sequence(
31
+ self,
32
+ seq: Sequence,
33
+ new_char_count: int,
34
+ sampling_params: SamplingParams,
35
+ lora_req: Optional[LoRARequest] = None,
36
+ ) -> None:
37
+ """Stop the finished sequences.
38
+
39
+ new_char_count is the number of chars added to the
40
+ sequence's output text for the newly generated token
41
+ """
42
+
43
+ # Check if the minimum number of tokens has been generated yet;
44
+ # skip the stop string/token checks if not
45
+ if seq.get_output_len() < sampling_params.min_tokens:
46
+ return
47
+
48
+ # Check if the sequence has generated the EOS token.
49
+ if ((not sampling_params.ignore_eos)
50
+ and seq.get_last_token_id() == seq.eos_token_id):
51
+ # Remove the last EOS token unless explicitly specified
52
+ # This prevents unintended exposure of the EOS token
53
+ if new_char_count and (
54
+ not sampling_params.include_stop_str_in_output):
55
+ seq.output_text = seq.output_text[:-new_char_count]
56
+ seq.status = SequenceStatus.FINISHED_STOPPED
57
+ return
58
+
59
+ # Check if a stop token was encountered.
60
+ # This assumes a single token produced per step.
61
+ last_token_id = seq.get_last_token_id()
62
+ if last_token_id in (sampling_params.stop_token_ids or ()):
63
+ if new_char_count and (
64
+ not sampling_params.include_stop_str_in_output):
65
+ # Remove last token
66
+ seq.output_text = seq.output_text[:-new_char_count]
67
+ seq.status = SequenceStatus.FINISHED_STOPPED
68
+ seq.stop_reason = last_token_id
69
+ return
70
+
71
+ # Check if any stop strings are matched.
72
+ stop = self.check_stop_strings(
73
+ seq.output_text, new_char_count, sampling_params.stop,
74
+ sampling_params.include_stop_str_in_output)
75
+ if stop is not None:
76
+ stop_str, truncate_to = stop
77
+ if truncate_to != -1:
78
+ seq.output_text = seq.output_text[:truncate_to]
79
+ seq.status = SequenceStatus.FINISHED_STOPPED
80
+ seq.stop_reason = stop_str
81
+ return
82
+
83
+ # Check if the sequence has reached max_model_len.
84
+ if seq.get_len() > self._get_max_model_len(lora_req):
85
+ seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED
86
+ return
87
+
88
+ # Check if the sequence has reached max_tokens.
89
+ if seq.get_output_len() == sampling_params.max_tokens:
90
+ seq.status = SequenceStatus.FINISHED_LENGTH_CAPPED
91
+ return
92
+
93
+ @staticmethod
94
+ def check_stop_strings(
95
+ output_text: str,
96
+ new_char_count: int,
97
+ stop: List[str],
98
+ include_in_output: bool,
99
+ ) -> Optional[Tuple[str, int]]:
100
+ """Check if any stop strings are matched and truncate sequence
101
+ output text accordingly.
102
+
103
+ Returns tuple (stop_string, offset) if matched or else None.
104
+
105
+ Where stop_string is the matched stop string and offset is the
106
+ length to which output_text should be truncated, or -1 for no
107
+ truncation.
108
+ """
109
+ if not new_char_count or not stop:
110
+ return None
111
+
112
+ for stop_str in stop:
113
+ stop_string_len = len(stop_str)
114
+ # Avoid searching already-searched text.
115
+ stop_index = output_text.find(stop_str,
116
+ 1 - new_char_count - stop_string_len)
117
+ if stop_index == -1:
118
+ continue
119
+
120
+ if include_in_output:
121
+ # Truncate to end of stop string.
122
+ stop_index += stop_string_len
123
+ if stop_index >= len(output_text):
124
+ # No truncation required.
125
+ return stop_str, -1
126
+
127
+ # Truncate the output text to either the beginning
128
+ # or end of the stop string.
129
+ return stop_str, stop_index
130
+ return None
infer_4_30_0/lib/python3.10/site-packages/vllm/engine/output_processor/util.py ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from typing import List
4
+ from typing import Sequence as GenericSequence
5
+ from typing import cast
6
+
7
+ from vllm.model_executor.layers.sampler import SamplerOutput
8
+ from vllm.sequence import CompletionSequenceGroupOutput, SequenceGroupOutput
9
+
10
+
11
+ def create_output_by_sequence_group(
12
+ outputs: GenericSequence[SamplerOutput],
13
+ num_seq_groups: int) -> List[List[SequenceGroupOutput]]:
14
+ """Helper method which transforms a 2d list organized by
15
+ [step][sequence group] into [sequence group][step].
16
+ """
17
+ output_by_sequence_group: List[List[CompletionSequenceGroupOutput]] = [
18
+ [] for _ in range(num_seq_groups)
19
+ ]
20
+ for step in outputs:
21
+ sequence_group_output: CompletionSequenceGroupOutput
22
+ for i, sequence_group_output in enumerate(step):
23
+ output_by_sequence_group[i].append(sequence_group_output)
24
+
25
+ # Cast to the more generic type that CompletionSequenceGroupOutput
26
+ # inherits from.
27
+ return cast(List[List[SequenceGroupOutput]], output_by_sequence_group)
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/api_server.py ADDED
@@ -0,0 +1,170 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """
3
+ NOTE: This API server is used only for demonstrating usage of AsyncEngine
4
+ and simple performance benchmarks. It is not intended for production use.
5
+ For production use, we recommend using our OpenAI compatible server.
6
+ We are also not going to accept PRs modifying this file, please
7
+ change `vllm/entrypoints/openai/api_server.py` instead.
8
+ """
9
+ import asyncio
10
+ import json
11
+ import ssl
12
+ from argparse import Namespace
13
+ from typing import Any, AsyncGenerator, Optional
14
+
15
+ from fastapi import FastAPI, Request
16
+ from fastapi.responses import JSONResponse, Response, StreamingResponse
17
+
18
+ from vllm.engine.arg_utils import AsyncEngineArgs
19
+ from vllm.engine.async_llm_engine import AsyncLLMEngine
20
+ from vllm.entrypoints.launcher import serve_http
21
+ from vllm.entrypoints.utils import with_cancellation
22
+ from vllm.logger import init_logger
23
+ from vllm.sampling_params import SamplingParams
24
+ from vllm.usage.usage_lib import UsageContext
25
+ from vllm.utils import FlexibleArgumentParser, random_uuid, set_ulimit
26
+ from vllm.version import __version__ as VLLM_VERSION
27
+
28
+ logger = init_logger("vllm.entrypoints.api_server")
29
+
30
+ TIMEOUT_KEEP_ALIVE = 5 # seconds.
31
+ app = FastAPI()
32
+ engine = None
33
+
34
+
35
+ @app.get("/health")
36
+ async def health() -> Response:
37
+ """Health check."""
38
+ return Response(status_code=200)
39
+
40
+
41
+ @app.post("/generate")
42
+ async def generate(request: Request) -> Response:
43
+ """Generate completion for the request.
44
+
45
+ The request should be a JSON object with the following fields:
46
+ - prompt: the prompt to use for the generation.
47
+ - stream: whether to stream the results or not.
48
+ - other fields: the sampling parameters (See `SamplingParams` for details).
49
+ """
50
+ request_dict = await request.json()
51
+ return await _generate(request_dict, raw_request=request)
52
+
53
+
54
+ @with_cancellation
55
+ async def _generate(request_dict: dict, raw_request: Request) -> Response:
56
+ prompt = request_dict.pop("prompt")
57
+ stream = request_dict.pop("stream", False)
58
+ sampling_params = SamplingParams(**request_dict)
59
+ request_id = random_uuid()
60
+
61
+ assert engine is not None
62
+ results_generator = engine.generate(prompt, sampling_params, request_id)
63
+
64
+ # Streaming case
65
+ async def stream_results() -> AsyncGenerator[bytes, None]:
66
+ async for request_output in results_generator:
67
+ prompt = request_output.prompt
68
+ assert prompt is not None
69
+ text_outputs = [
70
+ prompt + output.text for output in request_output.outputs
71
+ ]
72
+ ret = {"text": text_outputs}
73
+ yield (json.dumps(ret) + "\n").encode("utf-8")
74
+
75
+ if stream:
76
+ return StreamingResponse(stream_results())
77
+
78
+ # Non-streaming case
79
+ final_output = None
80
+ try:
81
+ async for request_output in results_generator:
82
+ final_output = request_output
83
+ except asyncio.CancelledError:
84
+ return Response(status_code=499)
85
+
86
+ assert final_output is not None
87
+ prompt = final_output.prompt
88
+ assert prompt is not None
89
+ text_outputs = [prompt + output.text for output in final_output.outputs]
90
+ ret = {"text": text_outputs}
91
+ return JSONResponse(ret)
92
+
93
+
94
+ def build_app(args: Namespace) -> FastAPI:
95
+ global app
96
+
97
+ app.root_path = args.root_path
98
+ return app
99
+
100
+
101
+ async def init_app(
102
+ args: Namespace,
103
+ llm_engine: Optional[AsyncLLMEngine] = None,
104
+ ) -> FastAPI:
105
+ app = build_app(args)
106
+
107
+ global engine
108
+
109
+ engine_args = AsyncEngineArgs.from_cli_args(args)
110
+ engine = (llm_engine
111
+ if llm_engine is not None else AsyncLLMEngine.from_engine_args(
112
+ engine_args, usage_context=UsageContext.API_SERVER))
113
+
114
+ return app
115
+
116
+
117
+ async def run_server(args: Namespace,
118
+ llm_engine: Optional[AsyncLLMEngine] = None,
119
+ **uvicorn_kwargs: Any) -> None:
120
+ logger.info("vLLM API server version %s", VLLM_VERSION)
121
+ logger.info("args: %s", args)
122
+
123
+ set_ulimit()
124
+
125
+ app = await init_app(args, llm_engine)
126
+ assert engine is not None
127
+
128
+ shutdown_task = await serve_http(
129
+ app,
130
+ sock=None,
131
+ host=args.host,
132
+ port=args.port,
133
+ log_level=args.log_level,
134
+ timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
135
+ ssl_keyfile=args.ssl_keyfile,
136
+ ssl_certfile=args.ssl_certfile,
137
+ ssl_ca_certs=args.ssl_ca_certs,
138
+ ssl_cert_reqs=args.ssl_cert_reqs,
139
+ **uvicorn_kwargs,
140
+ )
141
+
142
+ await shutdown_task
143
+
144
+
145
+ if __name__ == "__main__":
146
+ parser = FlexibleArgumentParser()
147
+ parser.add_argument("--host", type=str, default=None)
148
+ parser.add_argument("--port", type=int, default=8000, ge=1024, le=65535)
149
+ parser.add_argument("--ssl-keyfile", type=str, default=None)
150
+ parser.add_argument("--ssl-certfile", type=str, default=None)
151
+ parser.add_argument("--ssl-ca-certs",
152
+ type=str,
153
+ default=None,
154
+ help="The CA certificates file")
155
+ parser.add_argument(
156
+ "--ssl-cert-reqs",
157
+ type=int,
158
+ default=int(ssl.CERT_NONE),
159
+ help="Whether client certificate is required (see stdlib ssl module's)"
160
+ )
161
+ parser.add_argument(
162
+ "--root-path",
163
+ type=str,
164
+ default=None,
165
+ help="FastAPI root_path when app is behind a path based routing proxy")
166
+ parser.add_argument("--log-level", type=str, default="debug")
167
+ parser = AsyncEngineArgs.add_cli_args(parser)
168
+ args = parser.parse_args()
169
+
170
+ asyncio.run(run_server(args))
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/chat_utils.py ADDED
@@ -0,0 +1,1007 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import codecs
5
+ import json
6
+ from abc import ABC, abstractmethod
7
+ from collections import defaultdict, deque
8
+ from functools import cache, lru_cache, partial
9
+ from pathlib import Path
10
+ from typing import (Any, Awaitable, Callable, Dict, Generic, Iterable, List,
11
+ Literal, Optional, Tuple, TypeVar, Union, cast)
12
+
13
+ import jinja2.nodes
14
+ import transformers.utils.chat_template_utils as hf_chat_utils
15
+ # yapf conflicts with isort for this block
16
+ # yapf: disable
17
+ from openai.types.chat import (ChatCompletionAssistantMessageParam,
18
+ ChatCompletionContentPartImageParam,
19
+ ChatCompletionContentPartInputAudioParam)
20
+ from openai.types.chat import (
21
+ ChatCompletionContentPartParam as OpenAIChatCompletionContentPartParam)
22
+ from openai.types.chat import (ChatCompletionContentPartRefusalParam,
23
+ ChatCompletionContentPartTextParam)
24
+ from openai.types.chat import (
25
+ ChatCompletionMessageParam as OpenAIChatCompletionMessageParam)
26
+ from openai.types.chat import (ChatCompletionMessageToolCallParam,
27
+ ChatCompletionToolMessageParam)
28
+ from openai.types.chat.chat_completion_content_part_input_audio_param import (
29
+ InputAudio)
30
+ # yapf: enable
31
+ # pydantic needs the TypedDict from typing_extensions
32
+ from transformers import PreTrainedTokenizer, PreTrainedTokenizerFast
33
+ from typing_extensions import Required, TypeAlias, TypedDict
34
+
35
+ from vllm.config import ModelConfig
36
+ from vllm.logger import init_logger
37
+ from vllm.multimodal import MultiModalDataDict
38
+ from vllm.multimodal.utils import MediaConnector
39
+ from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
40
+
41
+ logger = init_logger(__name__)
42
+
43
+
44
+ class AudioURL(TypedDict, total=False):
45
+ url: Required[str]
46
+ """
47
+ Either a URL of the audio or a data URL with base64 encoded audio data.
48
+ """
49
+
50
+
51
+ class ChatCompletionContentPartAudioParam(TypedDict, total=False):
52
+ audio_url: Required[AudioURL]
53
+
54
+ type: Required[Literal["audio_url"]]
55
+ """The type of the content part."""
56
+
57
+
58
+ class VideoURL(TypedDict, total=False):
59
+ url: Required[str]
60
+ """
61
+ Either a URL of the video or a data URL with base64 encoded video data.
62
+ """
63
+
64
+
65
+ class ChatCompletionContentPartVideoParam(TypedDict, total=False):
66
+ video_url: Required[VideoURL]
67
+
68
+ type: Required[Literal["video_url"]]
69
+ """The type of the content part."""
70
+
71
+
72
+ class CustomChatCompletionContentSimpleImageParam(TypedDict, total=False):
73
+ """A simpler version of the param that only accepts a plain image_url.
74
+ This is supported by OpenAI API, although it is not documented.
75
+
76
+ Example:
77
+ {
78
+ "image_url": "https://example.com/image.jpg"
79
+ }
80
+ """
81
+ image_url: Required[str]
82
+
83
+
84
+ class CustomChatCompletionContentSimpleAudioParam(TypedDict, total=False):
85
+ """A simpler version of the param that only accepts a plain audio_url.
86
+
87
+ Example:
88
+ {
89
+ "audio_url": "https://example.com/audio.mp3"
90
+ }
91
+ """
92
+ audio_url: Required[str]
93
+
94
+
95
+ class CustomChatCompletionContentSimpleVideoParam(TypedDict, total=False):
96
+ """A simpler version of the param that only accepts a plain audio_url.
97
+
98
+ Example:
99
+ {
100
+ "video_url": "https://example.com/video.mp4"
101
+ }
102
+ """
103
+ video_url: Required[str]
104
+
105
+
106
+ ChatCompletionContentPartParam: TypeAlias = Union[
107
+ OpenAIChatCompletionContentPartParam, ChatCompletionContentPartAudioParam,
108
+ ChatCompletionContentPartInputAudioParam,
109
+ ChatCompletionContentPartVideoParam, ChatCompletionContentPartRefusalParam,
110
+ CustomChatCompletionContentSimpleImageParam,
111
+ CustomChatCompletionContentSimpleAudioParam,
112
+ CustomChatCompletionContentSimpleVideoParam, str]
113
+
114
+
115
+ class CustomChatCompletionMessageParam(TypedDict, total=False):
116
+ """Enables custom roles in the Chat Completion API."""
117
+ role: Required[str]
118
+ """The role of the message's author."""
119
+
120
+ content: Union[str, List[ChatCompletionContentPartParam]]
121
+ """The contents of the message."""
122
+
123
+ name: str
124
+ """An optional name for the participant.
125
+
126
+ Provides the model information to differentiate between participants of the
127
+ same role.
128
+ """
129
+
130
+ tool_call_id: Optional[str]
131
+ """Tool call that this message is responding to."""
132
+
133
+ tool_calls: Optional[Iterable[ChatCompletionMessageToolCallParam]]
134
+ """The tool calls generated by the model, such as function calls."""
135
+
136
+
137
+ ChatCompletionMessageParam = Union[OpenAIChatCompletionMessageParam,
138
+ CustomChatCompletionMessageParam]
139
+
140
+
141
+ # TODO: Make fields ReadOnly once mypy supports it
142
+ class ConversationMessage(TypedDict, total=False):
143
+ role: Required[str]
144
+ """The role of the message's author."""
145
+
146
+ content: Union[Optional[str], List[Dict[str, str]]]
147
+ """The contents of the message"""
148
+
149
+ tool_call_id: Optional[str]
150
+ """Tool call that this message is responding to."""
151
+
152
+ name: Optional[str]
153
+ """The name of the function to call"""
154
+
155
+ tool_calls: Optional[Iterable[ChatCompletionMessageToolCallParam]]
156
+ """The tool calls generated by the model, such as function calls."""
157
+
158
+
159
+ # Passed in by user
160
+ ChatTemplateContentFormatOption = Literal["auto", "string", "openai"]
161
+
162
+ # Used internally
163
+ _ChatTemplateContentFormat = Literal["string", "openai"]
164
+
165
+
166
+ def _is_var_access(node: jinja2.nodes.Node, varname: str) -> bool:
167
+ if isinstance(node, jinja2.nodes.Name):
168
+ return node.ctx == "load" and node.name == varname
169
+
170
+ return False
171
+
172
+
173
+ def _is_attr_access(node: jinja2.nodes.Node, varname: str, key: str) -> bool:
174
+ if isinstance(node, jinja2.nodes.Getitem):
175
+ return (_is_var_access(node.node, varname)
176
+ and isinstance(node.arg, jinja2.nodes.Const)
177
+ and node.arg.value == key)
178
+
179
+ if isinstance(node, jinja2.nodes.Getattr):
180
+ return _is_var_access(node.node, varname) and node.attr == key
181
+
182
+ return False
183
+
184
+
185
+ def _is_var_or_elems_access(
186
+ node: jinja2.nodes.Node,
187
+ varname: str,
188
+ key: Optional[str] = None,
189
+ ) -> bool:
190
+ if isinstance(node, jinja2.nodes.Filter):
191
+ return (node.node is not None
192
+ and _is_var_or_elems_access(node.node, varname, key))
193
+ if isinstance(node, jinja2.nodes.Test):
194
+ return _is_var_or_elems_access(node.node, varname, key)
195
+
196
+ if (isinstance(node, jinja2.nodes.Getitem)
197
+ and isinstance(node.arg, jinja2.nodes.Slice)):
198
+ return _is_var_or_elems_access(node.node, varname, key)
199
+
200
+ # yapf: disable
201
+ return (
202
+ _is_attr_access(node, varname, key) if key
203
+ else _is_var_access(node, varname)
204
+ ) # yapf: enable
205
+
206
+
207
+ def _iter_nodes_assign_var_or_elems(root: jinja2.nodes.Node, varname: str):
208
+ # Global variable that is implicitly defined at the root
209
+ yield root, varname
210
+
211
+ # Iterative BFS
212
+ related_varnames = deque([varname])
213
+ while related_varnames:
214
+ related_varname = related_varnames.popleft()
215
+
216
+ for assign_ast in root.find_all(jinja2.nodes.Assign):
217
+ lhs = assign_ast.target
218
+ rhs = assign_ast.node
219
+
220
+ if _is_var_or_elems_access(rhs, related_varname):
221
+ assert isinstance(lhs, jinja2.nodes.Name)
222
+ yield assign_ast, lhs.name
223
+
224
+ # Avoid infinite looping for self-assignment
225
+ if lhs.name != related_varname:
226
+ related_varnames.append(lhs.name)
227
+
228
+
229
+ # NOTE: The proper way to handle this is to build a CFG so that we can handle
230
+ # the scope in which each variable is defined, but that is too complicated
231
+ def _iter_nodes_assign_messages_item(root: jinja2.nodes.Node):
232
+ messages_varnames = [
233
+ varname
234
+ for _, varname in _iter_nodes_assign_var_or_elems(root, "messages")
235
+ ]
236
+
237
+ # Search for {%- for message in messages -%} loops
238
+ for loop_ast in root.find_all(jinja2.nodes.For):
239
+ loop_iter = loop_ast.iter
240
+ loop_target = loop_ast.target
241
+
242
+ for varname in messages_varnames:
243
+ if _is_var_or_elems_access(loop_iter, varname):
244
+ assert isinstance(loop_target, jinja2.nodes.Name)
245
+ yield loop_ast, loop_target.name
246
+ break
247
+
248
+
249
+ def _iter_nodes_assign_content_item(root: jinja2.nodes.Node):
250
+ message_varnames = [
251
+ varname for _, varname in _iter_nodes_assign_messages_item(root)
252
+ ]
253
+
254
+ # Search for {%- for content in message['content'] -%} loops
255
+ for loop_ast in root.find_all(jinja2.nodes.For):
256
+ loop_iter = loop_ast.iter
257
+ loop_target = loop_ast.target
258
+
259
+ for varname in message_varnames:
260
+ if _is_var_or_elems_access(loop_iter, varname, "content"):
261
+ assert isinstance(loop_target, jinja2.nodes.Name)
262
+ yield loop_ast, loop_target.name
263
+ break
264
+
265
+
266
+ def _try_extract_ast(chat_template: str) -> Optional[jinja2.nodes.Template]:
267
+ try:
268
+ jinja_compiled = hf_chat_utils._compile_jinja_template(chat_template)
269
+ return jinja_compiled.environment.parse(chat_template)
270
+ except Exception:
271
+ logger.exception("Error when compiling Jinja template")
272
+ return None
273
+
274
+
275
+ def _detect_content_format(
276
+ chat_template: str,
277
+ *,
278
+ default: _ChatTemplateContentFormat,
279
+ ) -> _ChatTemplateContentFormat:
280
+ jinja_ast = _try_extract_ast(chat_template)
281
+ if jinja_ast is None:
282
+ return default
283
+
284
+ try:
285
+ next(_iter_nodes_assign_content_item(jinja_ast))
286
+ except StopIteration:
287
+ return "string"
288
+ except Exception:
289
+ logger.exception("Error when parsing AST of Jinja template")
290
+ return default
291
+ else:
292
+ return "openai"
293
+
294
+
295
+ def _resolve_chat_template_content_format(
296
+ chat_template: Optional[str],
297
+ given_format: ChatTemplateContentFormatOption,
298
+ tokenizer: AnyTokenizer,
299
+ ) -> _ChatTemplateContentFormat:
300
+ if isinstance(tokenizer, (PreTrainedTokenizer, PreTrainedTokenizerFast)):
301
+ tokenizer_chat_template = tokenizer.chat_template
302
+ else:
303
+ tokenizer_chat_template = None
304
+
305
+ jinja_text: Optional[str]
306
+ if isinstance(tokenizer_chat_template, str) and chat_template is None:
307
+ jinja_text = tokenizer_chat_template
308
+ elif (isinstance(tokenizer_chat_template, dict)
309
+ and chat_template in tokenizer_chat_template):
310
+ jinja_text = tokenizer_chat_template[chat_template]
311
+ else:
312
+ jinja_text = load_chat_template(chat_template, is_literal=True)
313
+
314
+ detected_format = ("string" if jinja_text is None else
315
+ _detect_content_format(jinja_text, default="string"))
316
+
317
+ return detected_format if given_format == "auto" else given_format
318
+
319
+
320
+ @lru_cache
321
+ def resolve_chat_template_content_format(
322
+ chat_template: Optional[str],
323
+ given_format: ChatTemplateContentFormatOption,
324
+ tokenizer: AnyTokenizer,
325
+ ) -> _ChatTemplateContentFormat:
326
+ detected_format = _resolve_chat_template_content_format(
327
+ chat_template,
328
+ given_format,
329
+ tokenizer,
330
+ )
331
+
332
+ logger.info(
333
+ "Detected the chat template content format to be '%s'. "
334
+ "You can set `--chat-template-content-format` to override this.",
335
+ detected_format,
336
+ )
337
+
338
+ if given_format != "auto" and given_format != detected_format:
339
+ logger.warning(
340
+ "You specified `--chat-template-content-format %s` "
341
+ "which is different from the detected format '%s'. "
342
+ "If our automatic detection is incorrect, please consider "
343
+ "opening a GitHub issue so that we can improve it: "
344
+ "https://github.com/vllm-project/vllm/issues/new/choose",
345
+ given_format,
346
+ detected_format,
347
+ )
348
+
349
+ return detected_format
350
+
351
+
352
+ ModalityStr = Literal["image", "audio", "video"]
353
+ _T = TypeVar("_T")
354
+
355
+
356
+ class BaseMultiModalItemTracker(ABC, Generic[_T]):
357
+ """
358
+ Tracks multi-modal items in a given request and ensures that the number
359
+ of multi-modal items in a given request does not exceed the configured
360
+ maximum per prompt.
361
+ """
362
+
363
+ def __init__(self, model_config: ModelConfig, tokenizer: AnyTokenizer):
364
+ super().__init__()
365
+
366
+ self._model_config = model_config
367
+ self._tokenizer = tokenizer
368
+ self._allowed_items = (model_config.multimodal_config.limit_per_prompt
369
+ if model_config.multimodal_config else {})
370
+
371
+ self._items_by_modality = defaultdict[str, list[_T]](list)
372
+
373
+ @property
374
+ def model_config(self) -> ModelConfig:
375
+ return self._model_config
376
+
377
+ @property
378
+ def allowed_local_media_path(self):
379
+ return self._model_config.allowed_local_media_path
380
+
381
+ @staticmethod
382
+ @cache
383
+ def _cached_token_str(tokenizer: AnyTokenizer, token_index: int) -> str:
384
+ return tokenizer.decode(token_index)
385
+
386
+ def _placeholder_str(self, modality: ModalityStr,
387
+ current_count: int) -> Optional[str]:
388
+ # TODO: Let user specify how to insert image tokens into prompt
389
+ # (similar to chat template)
390
+ hf_config = self._model_config.hf_config
391
+ model_type = hf_config.model_type
392
+
393
+ if modality == "image":
394
+ if model_type == "phi3_v":
395
+ # Workaround since this token is not defined in the tokenizer
396
+ return f"<|image_{current_count}|>"
397
+ if model_type in ("minicpmo", "minicpmv"):
398
+ return "()"
399
+ if model_type in ("blip-2", "chatglm", "fuyu", "paligemma",
400
+ "pixtral"):
401
+ # These models do not use image tokens in the prompt
402
+ return None
403
+ if model_type == "qwen":
404
+ return f"Picture {current_count}: <img></img>"
405
+ if model_type.startswith("llava"):
406
+ return self._cached_token_str(self._tokenizer,
407
+ hf_config.image_token_index)
408
+ if model_type in ("chameleon", "deepseek_vl_v2", "internvl_chat",
409
+ "NVLM_D", "h2ovl_chat"):
410
+ return "<image>"
411
+ if model_type == "mllama":
412
+ return "<|image|>"
413
+ if model_type in ("qwen2_vl", "qwen2_5_vl"):
414
+ return "<|vision_start|><|image_pad|><|vision_end|>"
415
+ if model_type == "molmo":
416
+ return ""
417
+ if model_type == "idefics3":
418
+ return "<image>"
419
+ if model_type == "aria":
420
+ return "<|fim_prefix|><|img|><|fim_suffix|>"
421
+
422
+ raise TypeError(f"Unknown {modality} model type: {model_type}")
423
+ elif modality == "audio":
424
+ if model_type == "ultravox":
425
+ return "<|audio|>"
426
+ if model_type == "qwen2_audio":
427
+ return (f"Audio {current_count}: "
428
+ f"<|audio_bos|><|AUDIO|><|audio_eos|>")
429
+ if model_type == "minicpmo":
430
+ return "(<audio>./</audio>)"
431
+ raise TypeError(f"Unknown model type: {model_type}")
432
+ elif modality == "video":
433
+ if model_type in ("qwen2_vl", "qwen2_5_vl"):
434
+ return "<|vision_start|><|video_pad|><|vision_end|>"
435
+ if model_type in ("minicpmo", "minicpmv"):
436
+ return "(<video>./</video>)"
437
+ if model_type.startswith("llava"):
438
+ return self._cached_token_str(self._tokenizer,
439
+ hf_config.video_token_index)
440
+ raise TypeError(f"Unknown {modality} model type: {model_type}")
441
+ else:
442
+ raise TypeError(f"Unknown modality: {modality}")
443
+
444
+ def add(self, modality: ModalityStr, item: _T) -> Optional[str]:
445
+ """
446
+ Add a multi-modal item to the current prompt and returns the
447
+ placeholder string to use, if any.
448
+ """
449
+ allowed_count = self._allowed_items.get(modality, 1)
450
+ current_count = len(self._items_by_modality[modality]) + 1
451
+ if current_count > allowed_count:
452
+ raise ValueError(
453
+ f"At most {allowed_count} {modality}(s) may be provided in "
454
+ "one request.")
455
+
456
+ self._items_by_modality[modality].append(item)
457
+
458
+ return self._placeholder_str(modality, current_count)
459
+
460
+ @abstractmethod
461
+ def create_parser(self) -> "BaseMultiModalContentParser":
462
+ raise NotImplementedError
463
+
464
+
465
+ class MultiModalItemTracker(BaseMultiModalItemTracker[object]):
466
+
467
+ def all_mm_data(self) -> Optional[MultiModalDataDict]:
468
+ if self._items_by_modality:
469
+ return dict(self._items_by_modality)
470
+
471
+ return None
472
+
473
+ def create_parser(self) -> "BaseMultiModalContentParser":
474
+ return MultiModalContentParser(self)
475
+
476
+
477
+ class AsyncMultiModalItemTracker(BaseMultiModalItemTracker[Awaitable[object]]):
478
+
479
+ async def all_mm_data(self) -> Optional[MultiModalDataDict]:
480
+ if self._items_by_modality:
481
+ return {
482
+ modality: await asyncio.gather(*items)
483
+ for modality, items in self._items_by_modality.items()
484
+ }
485
+
486
+ return None
487
+
488
+ def create_parser(self) -> "BaseMultiModalContentParser":
489
+ return AsyncMultiModalContentParser(self)
490
+
491
+
492
+ class BaseMultiModalContentParser(ABC):
493
+
494
+ def __init__(self) -> None:
495
+ super().__init__()
496
+
497
+ # multimodal placeholder_string : count
498
+ self._placeholder_counts: Dict[str, int] = defaultdict(lambda: 0)
499
+
500
+ def _add_placeholder(self, placeholder: Optional[str]):
501
+ if placeholder:
502
+ self._placeholder_counts[placeholder] += 1
503
+
504
+ def mm_placeholder_counts(self) -> Dict[str, int]:
505
+ return dict(self._placeholder_counts)
506
+
507
+ @abstractmethod
508
+ def parse_image(self, image_url: str) -> None:
509
+ raise NotImplementedError
510
+
511
+ @abstractmethod
512
+ def parse_audio(self, audio_url: str) -> None:
513
+ raise NotImplementedError
514
+
515
+ @abstractmethod
516
+ def parse_input_audio(self, input_audio: InputAudio) -> None:
517
+ raise NotImplementedError
518
+
519
+ @abstractmethod
520
+ def parse_video(self, video_url: str) -> None:
521
+ raise NotImplementedError
522
+
523
+
524
+ class MultiModalContentParser(BaseMultiModalContentParser):
525
+
526
+ def __init__(self, tracker: MultiModalItemTracker) -> None:
527
+ super().__init__()
528
+
529
+ self._tracker = tracker
530
+
531
+ self._connector = MediaConnector(
532
+ allowed_local_media_path=tracker.allowed_local_media_path,
533
+ )
534
+
535
+ def parse_image(self, image_url: str) -> None:
536
+ image = self._connector.fetch_image(image_url)
537
+
538
+ placeholder = self._tracker.add("image", image)
539
+ self._add_placeholder(placeholder)
540
+
541
+ def parse_audio(self, audio_url: str) -> None:
542
+ audio = self._connector.fetch_audio(audio_url)
543
+
544
+ placeholder = self._tracker.add("audio", audio)
545
+ self._add_placeholder(placeholder)
546
+
547
+ def parse_input_audio(self, input_audio: InputAudio) -> None:
548
+ audio_data = input_audio.get("data", "")
549
+ audio_format = input_audio.get("format", "")
550
+ audio_url = f"data:audio/{audio_format};base64,{audio_data}"
551
+
552
+ return self.parse_audio(audio_url)
553
+
554
+ def parse_video(self, video_url: str) -> None:
555
+ video = self._connector.fetch_video(video_url)
556
+
557
+ placeholder = self._tracker.add("video", video)
558
+ self._add_placeholder(placeholder)
559
+
560
+
561
+ class AsyncMultiModalContentParser(BaseMultiModalContentParser):
562
+
563
+ def __init__(self, tracker: AsyncMultiModalItemTracker) -> None:
564
+ super().__init__()
565
+
566
+ self._tracker = tracker
567
+ self._connector = MediaConnector(
568
+ allowed_local_media_path=tracker.allowed_local_media_path,
569
+ )
570
+
571
+ def parse_image(self, image_url: str) -> None:
572
+ image_coro = self._connector.fetch_image_async(image_url)
573
+
574
+ placeholder = self._tracker.add("image", image_coro)
575
+ self._add_placeholder(placeholder)
576
+
577
+ def parse_audio(self, audio_url: str) -> None:
578
+ audio_coro = self._connector.fetch_audio_async(audio_url)
579
+
580
+ placeholder = self._tracker.add("audio", audio_coro)
581
+ self._add_placeholder(placeholder)
582
+
583
+ def parse_input_audio(self, input_audio: InputAudio) -> None:
584
+ audio_data = input_audio.get("data", "")
585
+ audio_format = input_audio.get("format", "")
586
+ audio_url = f"data:audio/{audio_format};base64,{audio_data}"
587
+
588
+ return self.parse_audio(audio_url)
589
+
590
+ def parse_video(self, video_url: str) -> None:
591
+ video = self._connector.fetch_video_async(video_url)
592
+
593
+ placeholder = self._tracker.add("video", video)
594
+ self._add_placeholder(placeholder)
595
+
596
+
597
+ def validate_chat_template(chat_template: Optional[Union[Path, str]]):
598
+ """Raises if the provided chat template appears invalid."""
599
+ if chat_template is None:
600
+ return
601
+
602
+ elif isinstance(chat_template, Path) and not chat_template.exists():
603
+ raise FileNotFoundError(
604
+ "the supplied chat template path doesn't exist")
605
+
606
+ elif isinstance(chat_template, str):
607
+ JINJA_CHARS = "{}\n"
608
+ if not any(c in chat_template
609
+ for c in JINJA_CHARS) and not Path(chat_template).exists():
610
+ raise ValueError(
611
+ f"The supplied chat template string ({chat_template}) "
612
+ f"appears path-like, but doesn't exist!")
613
+
614
+ else:
615
+ raise TypeError(
616
+ f"{type(chat_template)} is not a valid chat template type")
617
+
618
+
619
+ def load_chat_template(
620
+ chat_template: Optional[Union[Path, str]],
621
+ *,
622
+ is_literal: bool = False,
623
+ ) -> Optional[str]:
624
+ if chat_template is None:
625
+ return None
626
+
627
+ if is_literal:
628
+ if isinstance(chat_template, Path):
629
+ raise TypeError("chat_template is expected to be read directly "
630
+ "from its value")
631
+
632
+ return codecs.decode(chat_template, "unicode_escape")
633
+
634
+ try:
635
+ with open(chat_template) as f:
636
+ return f.read()
637
+ except OSError as e:
638
+ if isinstance(chat_template, Path):
639
+ raise
640
+
641
+ JINJA_CHARS = "{}\n"
642
+ if not any(c in chat_template for c in JINJA_CHARS):
643
+ msg = (f"The supplied chat template ({chat_template}) "
644
+ f"looks like a file path, but it failed to be "
645
+ f"opened. Reason: {e}")
646
+ raise ValueError(msg) from e
647
+
648
+ # If opening a file fails, set chat template to be args to
649
+ # ensure we decode so our escape are interpreted correctly
650
+ return load_chat_template(chat_template, is_literal=True)
651
+
652
+
653
+ # TODO: Let user specify how to insert multimodal tokens into prompt
654
+ # (similar to chat template)
655
+ def _get_full_multimodal_text_prompt(placeholder_counts: Dict[str, int],
656
+ text_prompt: str) -> str:
657
+ """Combine multimodal prompts for a multimodal language model."""
658
+
659
+ # Look through the text prompt to check for missing placeholders
660
+ missing_placeholders: List[str] = []
661
+ for placeholder in placeholder_counts:
662
+
663
+ # For any existing placeholder in the text prompt, we leave it as is
664
+ placeholder_counts[placeholder] -= text_prompt.count(placeholder)
665
+
666
+ if placeholder_counts[placeholder] < 0:
667
+ raise ValueError(
668
+ f"Found more '{placeholder}' placeholders in input prompt than "
669
+ "actual multimodal data items.")
670
+
671
+ missing_placeholders.extend([placeholder] *
672
+ placeholder_counts[placeholder])
673
+
674
+ # NOTE: For now we always add missing placeholders at the front of
675
+ # the prompt. This may change to be customizable in the future.
676
+ return "\n".join(missing_placeholders + [text_prompt])
677
+
678
+
679
+ # No need to validate using Pydantic again
680
+ _TextParser = partial(cast, ChatCompletionContentPartTextParam)
681
+ _ImageParser = partial(cast, ChatCompletionContentPartImageParam)
682
+ _AudioParser = partial(cast, ChatCompletionContentPartAudioParam)
683
+ _InputAudioParser = partial(cast, ChatCompletionContentPartInputAudioParam)
684
+ _RefusalParser = partial(cast, ChatCompletionContentPartRefusalParam)
685
+ _VideoParser = partial(cast, ChatCompletionContentPartVideoParam)
686
+
687
+ _ContentPart: TypeAlias = Union[str, Dict[str, str], InputAudio]
688
+
689
+ # Define a mapping from part types to their corresponding parsing functions.
690
+ MM_PARSER_MAP: Dict[
691
+ str,
692
+ Callable[[ChatCompletionContentPartParam], _ContentPart],
693
+ ] = {
694
+ "text":
695
+ lambda part: _TextParser(part).get("text", ""),
696
+ "image_url":
697
+ lambda part: _ImageParser(part).get("image_url", {}).get("url", ""),
698
+ "audio_url":
699
+ lambda part: _AudioParser(part).get("audio_url", {}).get("url", ""),
700
+ "input_audio":
701
+ lambda part: _InputAudioParser(part).get("input_audio", {}),
702
+ "refusal":
703
+ lambda part: _RefusalParser(part).get("refusal", ""),
704
+ "video_url":
705
+ lambda part: _VideoParser(part).get("video_url", {}).get("url", ""),
706
+ }
707
+
708
+
709
+ def _parse_chat_message_content_mm_part(
710
+ part: ChatCompletionContentPartParam) -> tuple[str, _ContentPart]:
711
+ """
712
+ Parses a given multi-modal content part based on its type.
713
+
714
+ Args:
715
+ part: A dict containing the content part, with a potential 'type' field.
716
+
717
+ Returns:
718
+ A tuple (part_type, content) where:
719
+ - part_type: Type of the part (e.g., 'text', 'image_url').
720
+ - content: Parsed content (e.g., text, image URL).
721
+
722
+ Raises:
723
+ ValueError: If the 'type' field is missing and no direct URL is found.
724
+ """
725
+ assert isinstance(
726
+ part, dict) # This is needed to avoid mypy errors: part.get() from str
727
+ part_type = part.get("type", None)
728
+
729
+ if isinstance(part_type, str) and part_type in MM_PARSER_MAP:
730
+ content = MM_PARSER_MAP[part_type](part)
731
+
732
+ # Special case for 'image_url.detail'
733
+ # We only support 'auto', which is the default
734
+ if part_type == "image_url" and part.get("detail", "auto") != "auto":
735
+ logger.warning("'image_url.detail' is currently not supported "
736
+ "and will be ignored.")
737
+
738
+ return part_type, content
739
+
740
+ # Handle missing 'type' but provided direct URL fields.
741
+ # 'type' is required field by pydantic
742
+ if part_type is None:
743
+ if part.get("image_url") is not None:
744
+ image_params = cast(CustomChatCompletionContentSimpleImageParam,
745
+ part)
746
+ return "image_url", image_params.get("image_url", "")
747
+ if part.get("audio_url") is not None:
748
+ audio_params = cast(CustomChatCompletionContentSimpleAudioParam,
749
+ part)
750
+ return "audio_url", audio_params.get("audio_url", "")
751
+ if part.get("input_audio") is not None:
752
+ input_audio_params = cast(Dict[str, str], part)
753
+ return "input_audio", input_audio_params
754
+ if part.get("video_url") is not None:
755
+ video_params = cast(CustomChatCompletionContentSimpleVideoParam,
756
+ part)
757
+ return "video_url", video_params.get("video_url", "")
758
+ # Raise an error if no 'type' or direct URL is found.
759
+ raise ValueError("Missing 'type' field in multimodal part.")
760
+
761
+ if not isinstance(part_type, str):
762
+ raise ValueError("Invalid 'type' field in multimodal part.")
763
+ return part_type, "unknown part_type content"
764
+
765
+
766
+ VALID_MESSAGE_CONTENT_MM_PART_TYPES = ("text", "refusal", "image_url",
767
+ "audio_url", "input_audio", "video_url")
768
+
769
+
770
+ def _parse_chat_message_content_parts(
771
+ role: str,
772
+ parts: Iterable[ChatCompletionContentPartParam],
773
+ mm_tracker: BaseMultiModalItemTracker,
774
+ *,
775
+ wrap_dicts: bool,
776
+ ) -> List[ConversationMessage]:
777
+ content = list[_ContentPart]()
778
+
779
+ mm_parser = mm_tracker.create_parser()
780
+
781
+ for part in parts:
782
+ parse_res = _parse_chat_message_content_part(
783
+ part,
784
+ mm_parser,
785
+ wrap_dicts=wrap_dicts,
786
+ )
787
+ if parse_res:
788
+ content.append(parse_res)
789
+
790
+ if wrap_dicts:
791
+ # Parsing wraps images and texts as interleaved dictionaries
792
+ return [ConversationMessage(role=role,
793
+ content=content)] # type: ignore
794
+ texts = cast(List[str], content)
795
+ text_prompt = "\n".join(texts)
796
+ mm_placeholder_counts = mm_parser.mm_placeholder_counts()
797
+ if mm_placeholder_counts:
798
+ text_prompt = _get_full_multimodal_text_prompt(mm_placeholder_counts,
799
+ text_prompt)
800
+ return [ConversationMessage(role=role, content=text_prompt)]
801
+
802
+
803
+ def _parse_chat_message_content_part(
804
+ part: ChatCompletionContentPartParam,
805
+ mm_parser: BaseMultiModalContentParser,
806
+ *,
807
+ wrap_dicts: bool,
808
+ ) -> Optional[_ContentPart]:
809
+ """Parses a single part of a conversation. If wrap_dicts is True,
810
+ structured dictionary pieces for texts and images will be
811
+ wrapped in dictionaries, i.e., {"type": "text", "text", ...} and
812
+ {"type": "image"}, respectively. Otherwise multimodal data will be
813
+ handled by mm_parser, and texts will be returned as strings to be joined
814
+ with multimodal placeholders.
815
+ """
816
+ if isinstance(part, str): # Handle plain text parts
817
+ return part
818
+
819
+ # Handle structured dictionary parts
820
+ part_type, content = _parse_chat_message_content_mm_part(part)
821
+
822
+ # if part_type is text/refusal/image_url/audio_url/video_url/input_audio but
823
+ # content is empty, log a warning and skip
824
+ if part_type in VALID_MESSAGE_CONTENT_MM_PART_TYPES and not content:
825
+ logger.warning(
826
+ "Skipping multimodal part (type: '%s')"
827
+ "with empty / unparsable content.", part_type)
828
+ return None
829
+
830
+ if part_type in ("text", "refusal"):
831
+ str_content = cast(str, content)
832
+ if wrap_dicts:
833
+ return {'type': 'text', 'text': str_content}
834
+ else:
835
+ return str_content
836
+
837
+ if part_type == "image_url":
838
+ str_content = cast(str, content)
839
+ mm_parser.parse_image(str_content)
840
+ return {'type': 'image'} if wrap_dicts else None
841
+
842
+ if part_type == "audio_url":
843
+ str_content = cast(str, content)
844
+ mm_parser.parse_audio(str_content)
845
+ return {'type': 'audio'} if wrap_dicts else None
846
+
847
+ if part_type == "input_audio":
848
+ dict_content = cast(InputAudio, content)
849
+ mm_parser.parse_input_audio(dict_content)
850
+ return {'type': 'audio'} if wrap_dicts else None
851
+
852
+ if part_type == "video_url":
853
+ str_content = cast(str, content)
854
+ mm_parser.parse_video(str_content)
855
+ return {'type': 'video'} if wrap_dicts else None
856
+
857
+ raise NotImplementedError(f"Unknown part type: {part_type}")
858
+
859
+
860
+ # No need to validate using Pydantic again
861
+ _AssistantParser = partial(cast, ChatCompletionAssistantMessageParam)
862
+ _ToolParser = partial(cast, ChatCompletionToolMessageParam)
863
+
864
+
865
+ def _parse_chat_message_content(
866
+ message: ChatCompletionMessageParam,
867
+ mm_tracker: BaseMultiModalItemTracker,
868
+ content_format: _ChatTemplateContentFormat,
869
+ ) -> List[ConversationMessage]:
870
+ role = message["role"]
871
+ content = message.get("content")
872
+
873
+ if content is None:
874
+ content = []
875
+ elif isinstance(content, str):
876
+ content = [
877
+ ChatCompletionContentPartTextParam(type="text", text=content)
878
+ ]
879
+ result = _parse_chat_message_content_parts(
880
+ role,
881
+ content, # type: ignore
882
+ mm_tracker,
883
+ wrap_dicts=(content_format == "openai"),
884
+ )
885
+
886
+ for result_msg in result:
887
+ if role == 'assistant':
888
+ parsed_msg = _AssistantParser(message)
889
+
890
+ if "tool_calls" in parsed_msg:
891
+ result_msg["tool_calls"] = list(parsed_msg["tool_calls"])
892
+ elif role == "tool":
893
+ parsed_msg = _ToolParser(message)
894
+ if "tool_call_id" in parsed_msg:
895
+ result_msg["tool_call_id"] = parsed_msg["tool_call_id"]
896
+
897
+ if "name" in message and isinstance(message["name"], str):
898
+ result_msg["name"] = message["name"]
899
+
900
+ return result
901
+
902
+
903
+ def _postprocess_messages(messages: List[ConversationMessage]) -> None:
904
+ # per the Transformers docs & maintainers, tool call arguments in
905
+ # assistant-role messages with tool_calls need to be dicts not JSON str -
906
+ # this is how tool-use chat templates will expect them moving forwards
907
+ # so, for messages that have tool_calls, parse the string (which we get
908
+ # from openAI format) to dict
909
+ for message in messages:
910
+ if (message["role"] == "assistant" and "tool_calls" in message
911
+ and isinstance(message["tool_calls"], list)):
912
+
913
+ for item in message["tool_calls"]:
914
+ item["function"]["arguments"] = json.loads(
915
+ item["function"]["arguments"])
916
+
917
+
918
+ def parse_chat_messages(
919
+ messages: List[ChatCompletionMessageParam],
920
+ model_config: ModelConfig,
921
+ tokenizer: AnyTokenizer,
922
+ content_format: _ChatTemplateContentFormat,
923
+ ) -> Tuple[List[ConversationMessage], Optional[MultiModalDataDict]]:
924
+ conversation: List[ConversationMessage] = []
925
+ mm_tracker = MultiModalItemTracker(model_config, tokenizer)
926
+
927
+ for msg in messages:
928
+ sub_messages = _parse_chat_message_content(
929
+ msg,
930
+ mm_tracker,
931
+ content_format,
932
+ )
933
+
934
+ conversation.extend(sub_messages)
935
+
936
+ _postprocess_messages(conversation)
937
+
938
+ return conversation, mm_tracker.all_mm_data()
939
+
940
+
941
+ def parse_chat_messages_futures(
942
+ messages: List[ChatCompletionMessageParam],
943
+ model_config: ModelConfig,
944
+ tokenizer: AnyTokenizer,
945
+ content_format: _ChatTemplateContentFormat,
946
+ ) -> Tuple[List[ConversationMessage], Awaitable[Optional[MultiModalDataDict]]]:
947
+ conversation: List[ConversationMessage] = []
948
+ mm_tracker = AsyncMultiModalItemTracker(model_config, tokenizer)
949
+
950
+ for msg in messages:
951
+ sub_messages = _parse_chat_message_content(
952
+ msg,
953
+ mm_tracker,
954
+ content_format,
955
+ )
956
+
957
+ conversation.extend(sub_messages)
958
+
959
+ _postprocess_messages(conversation)
960
+
961
+ return conversation, mm_tracker.all_mm_data()
962
+
963
+
964
+ def apply_hf_chat_template(
965
+ tokenizer: Union[PreTrainedTokenizer, PreTrainedTokenizerFast],
966
+ conversation: List[ConversationMessage],
967
+ chat_template: Optional[str],
968
+ *,
969
+ tokenize: bool = False, # Different from HF's default
970
+ **kwargs: Any,
971
+ ) -> str:
972
+ if chat_template is None and tokenizer.chat_template is None:
973
+ raise ValueError(
974
+ "As of transformers v4.44, default chat template is no longer "
975
+ "allowed, so you must provide a chat template if the tokenizer "
976
+ "does not define one.")
977
+
978
+ return tokenizer.apply_chat_template(
979
+ conversation=conversation, # type: ignore[arg-type]
980
+ chat_template=chat_template,
981
+ tokenize=tokenize,
982
+ **kwargs,
983
+ )
984
+
985
+
986
+ def apply_mistral_chat_template(
987
+ tokenizer: MistralTokenizer,
988
+ messages: List[ChatCompletionMessageParam],
989
+ chat_template: Optional[str] = None,
990
+ **kwargs: Any,
991
+ ) -> List[int]:
992
+ if chat_template is not None:
993
+ logger.warning_once(
994
+ "'chat_template' cannot be overridden for mistral tokenizer.")
995
+ if "add_generation_prompt" in kwargs:
996
+ logger.warning_once(
997
+ "'add_generation_prompt' is not supported for mistral tokenizer, "
998
+ "so it will be ignored.")
999
+ if "continue_final_message" in kwargs:
1000
+ logger.warning_once(
1001
+ "'continue_final_message' is not supported for mistral tokenizer, "
1002
+ "so it will be ignored.")
1003
+
1004
+ return tokenizer.apply_chat_template(
1005
+ messages=messages,
1006
+ **kwargs,
1007
+ )
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/launcher.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import signal
5
+ import socket
6
+ from http import HTTPStatus
7
+ from typing import Any, Optional
8
+
9
+ import uvicorn
10
+ from fastapi import FastAPI, Request, Response
11
+
12
+ from vllm import envs
13
+ from vllm.engine.async_llm_engine import AsyncEngineDeadError
14
+ from vllm.engine.multiprocessing import MQEngineDeadError
15
+ from vllm.logger import init_logger
16
+ from vllm.utils import find_process_using_port
17
+
18
+ logger = init_logger(__name__)
19
+
20
+
21
+ async def serve_http(app: FastAPI, sock: Optional[socket.socket],
22
+ **uvicorn_kwargs: Any):
23
+ logger.info("Available routes are:")
24
+ for route in app.routes:
25
+ methods = getattr(route, "methods", None)
26
+ path = getattr(route, "path", None)
27
+
28
+ if methods is None or path is None:
29
+ continue
30
+
31
+ logger.info("Route: %s, Methods: %s", path, ', '.join(methods))
32
+
33
+ config = uvicorn.Config(app, **uvicorn_kwargs)
34
+ server = uvicorn.Server(config)
35
+ _add_shutdown_handlers(app, server)
36
+
37
+ loop = asyncio.get_running_loop()
38
+
39
+ server_task = loop.create_task(
40
+ server.serve(sockets=[sock] if sock else None))
41
+
42
+ def signal_handler() -> None:
43
+ # prevents the uvicorn signal handler to exit early
44
+ server_task.cancel()
45
+
46
+ async def dummy_shutdown() -> None:
47
+ pass
48
+
49
+ loop.add_signal_handler(signal.SIGINT, signal_handler)
50
+ loop.add_signal_handler(signal.SIGTERM, signal_handler)
51
+
52
+ try:
53
+ await server_task
54
+ return dummy_shutdown()
55
+ except asyncio.CancelledError:
56
+ port = uvicorn_kwargs["port"]
57
+ process = find_process_using_port(port)
58
+ if process is not None:
59
+ logger.debug(
60
+ "port %s is used by process %s launched with command:\n%s",
61
+ port, process, " ".join(process.cmdline()))
62
+ logger.info("Shutting down FastAPI HTTP server.")
63
+ return server.shutdown()
64
+
65
+
66
+ def _add_shutdown_handlers(app: FastAPI, server: uvicorn.Server) -> None:
67
+ """Adds handlers for fatal errors that should crash the server"""
68
+
69
+ @app.exception_handler(RuntimeError)
70
+ async def runtime_error_handler(request: Request, __):
71
+ """On generic runtime error, check to see if the engine has died.
72
+ It probably has, in which case the server will no longer be able to
73
+ handle requests. Trigger a graceful shutdown with a SIGTERM."""
74
+ engine = request.app.state.engine_client
75
+ if (not envs.VLLM_KEEP_ALIVE_ON_ENGINE_DEATH and engine.errored
76
+ and not engine.is_running):
77
+ logger.fatal("AsyncLLMEngine has failed, terminating server "
78
+ "process")
79
+ # See discussions here on shutting down a uvicorn server
80
+ # https://github.com/encode/uvicorn/discussions/1103
81
+ # In this case we cannot await the server shutdown here because
82
+ # this handler must first return to close the connection for
83
+ # this request.
84
+ server.should_exit = True
85
+
86
+ return Response(status_code=HTTPStatus.INTERNAL_SERVER_ERROR)
87
+
88
+ @app.exception_handler(AsyncEngineDeadError)
89
+ async def async_engine_dead_handler(_, __):
90
+ """Kill the server if the async engine is already dead. It will
91
+ not handle any further requests."""
92
+ if not envs.VLLM_KEEP_ALIVE_ON_ENGINE_DEATH:
93
+ logger.fatal("AsyncLLMEngine is already dead, terminating server "
94
+ "process")
95
+ server.should_exit = True
96
+
97
+ return Response(status_code=HTTPStatus.INTERNAL_SERVER_ERROR)
98
+
99
+ @app.exception_handler(MQEngineDeadError)
100
+ async def mq_engine_dead_handler(_, __):
101
+ """Kill the server if the mq engine is already dead. It will
102
+ not handle any further requests."""
103
+ if not envs.VLLM_KEEP_ALIVE_ON_ENGINE_DEATH:
104
+ logger.fatal("MQLLMEngine is already dead, terminating server "
105
+ "process")
106
+ server.should_exit = True
107
+
108
+ return Response(status_code=HTTPStatus.INTERNAL_SERVER_ERROR)
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/llm.py ADDED
@@ -0,0 +1,1421 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import itertools
4
+ import warnings
5
+ from contextlib import contextmanager
6
+ from typing import (Any, Callable, ClassVar, Dict, List, Optional, Sequence,
7
+ Tuple, Type, Union, cast, overload)
8
+
9
+ import cloudpickle
10
+ import torch
11
+ import torch.nn as nn
12
+ from tqdm import tqdm
13
+ from typing_extensions import TypeVar, deprecated
14
+
15
+ from vllm import envs
16
+ from vllm.beam_search import (BeamSearchInstance, BeamSearchOutput,
17
+ BeamSearchSequence, get_beam_search_score)
18
+ from vllm.config import CompilationConfig
19
+ from vllm.engine.arg_utils import (EngineArgs, HfOverrides, PoolerConfig,
20
+ TaskOption)
21
+ from vllm.engine.llm_engine import LLMEngine
22
+ from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
23
+ ChatTemplateContentFormatOption,
24
+ apply_hf_chat_template,
25
+ apply_mistral_chat_template,
26
+ parse_chat_messages,
27
+ resolve_chat_template_content_format)
28
+ from vllm.inputs import PromptType, SingletonPrompt, TextPrompt, TokensPrompt
29
+ from vllm.inputs.parse import is_token_prompt, parse_and_batch_prompt
30
+ from vllm.logger import init_logger
31
+ from vllm.lora.request import LoRARequest
32
+ from vllm.model_executor.guided_decoding.guided_fields import (
33
+ GuidedDecodingRequest, LLMGuidedOptions)
34
+ from vllm.outputs import (ClassificationRequestOutput, EmbeddingRequestOutput,
35
+ PoolingRequestOutput, RequestOutput,
36
+ ScoringRequestOutput)
37
+ from vllm.pooling_params import PoolingParams
38
+ from vllm.prompt_adapter.request import PromptAdapterRequest
39
+ from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams,
40
+ RequestOutputKind, SamplingParams)
41
+ from vllm.transformers_utils.tokenizer import (AnyTokenizer, MistralTokenizer,
42
+ get_cached_tokenizer)
43
+ from vllm.transformers_utils.tokenizer_group import TokenizerGroup
44
+ from vllm.usage.usage_lib import UsageContext
45
+ from vllm.utils import Counter, deprecate_args, deprecate_kwargs, is_list_of
46
+
47
+ logger = init_logger(__name__)
48
+
49
+ _R = TypeVar("_R", default=Any)
50
+
51
+
52
+ class LLM:
53
+ """An LLM for generating texts from given prompts and sampling parameters.
54
+
55
+ This class includes a tokenizer, a language model (possibly distributed
56
+ across multiple GPUs), and GPU memory space allocated for intermediate
57
+ states (aka KV cache). Given a batch of prompts and sampling parameters,
58
+ this class generates texts from the model, using an intelligent batching
59
+ mechanism and efficient memory management.
60
+
61
+ Args:
62
+ model: The name or path of a HuggingFace Transformers model.
63
+ tokenizer: The name or path of a HuggingFace Transformers tokenizer.
64
+ tokenizer_mode: The tokenizer mode. "auto" will use the fast tokenizer
65
+ if available, and "slow" will always use the slow tokenizer.
66
+ skip_tokenizer_init: If true, skip initialization of tokenizer and
67
+ detokenizer. Expect valid prompt_token_ids and None for prompt
68
+ from the input.
69
+ trust_remote_code: Trust remote code (e.g., from HuggingFace) when
70
+ downloading the model and tokenizer.
71
+ allowed_local_media_path: Allowing API requests to read local images
72
+ or videos from directories specified by the server file system.
73
+ This is a security risk. Should only be enabled in trusted
74
+ environments.
75
+ tensor_parallel_size: The number of GPUs to use for distributed
76
+ execution with tensor parallelism.
77
+ dtype: The data type for the model weights and activations. Currently,
78
+ we support `float32`, `float16`, and `bfloat16`. If `auto`, we use
79
+ the `torch_dtype` attribute specified in the model config file.
80
+ However, if the `torch_dtype` in the config is `float32`, we will
81
+ use `float16` instead.
82
+ quantization: The method used to quantize the model weights. Currently,
83
+ we support "awq", "gptq", and "fp8" (experimental).
84
+ If None, we first check the `quantization_config` attribute in the
85
+ model config file. If that is None, we assume the model weights are
86
+ not quantized and use `dtype` to determine the data type of
87
+ the weights.
88
+ revision: The specific model version to use. It can be a branch name,
89
+ a tag name, or a commit id.
90
+ tokenizer_revision: The specific tokenizer version to use. It can be a
91
+ branch name, a tag name, or a commit id.
92
+ seed: The seed to initialize the random number generator for sampling.
93
+ gpu_memory_utilization: The ratio (between 0 and 1) of GPU memory to
94
+ reserve for the model weights, activations, and KV cache. Higher
95
+ values will increase the KV cache size and thus improve the model's
96
+ throughput. However, if the value is too high, it may cause out-of-
97
+ memory (OOM) errors.
98
+ swap_space: The size (GiB) of CPU memory per GPU to use as swap space.
99
+ This can be used for temporarily storing the states of the requests
100
+ when their `best_of` sampling parameters are larger than 1. If all
101
+ requests will have `best_of=1`, you can safely set this to 0.
102
+ Otherwise, too small values may cause out-of-memory (OOM) errors.
103
+ cpu_offload_gb: The size (GiB) of CPU memory to use for offloading
104
+ the model weights. This virtually increases the GPU memory space
105
+ you can use to hold the model weights, at the cost of CPU-GPU data
106
+ transfer for every forward pass.
107
+ enforce_eager: Whether to enforce eager execution. If True, we will
108
+ disable CUDA graph and always execute the model in eager mode.
109
+ If False, we will use CUDA graph and eager execution in hybrid.
110
+ max_seq_len_to_capture: Maximum sequence len covered by CUDA graphs.
111
+ When a sequence has context length larger than this, we fall back
112
+ to eager mode. Additionally for encoder-decoder models, if the
113
+ sequence length of the encoder input is larger than this, we fall
114
+ back to the eager mode.
115
+ disable_custom_all_reduce: See :class:`~vllm.config.ParallelConfig`
116
+ disable_async_output_proc: Disable async output processing.
117
+ This may result in lower performance.
118
+ hf_overrides: If a dictionary, contains arguments to be forwarded to the
119
+ HuggingFace config. If a callable, it is called to update the
120
+ HuggingFace config.
121
+ compilation_config: Either an integer or a dictionary. If it is an
122
+ integer, it is used as the level of compilation optimization. If it
123
+ is a dictionary, it can specify the full compilation configuration.
124
+ **kwargs: Arguments for :class:`~vllm.EngineArgs`. (See
125
+ :ref:`engine-args`)
126
+
127
+ Note:
128
+ This class is intended to be used for offline inference. For online
129
+ serving, use the :class:`~vllm.AsyncLLMEngine` class instead.
130
+ """
131
+
132
+ DEPRECATE_LEGACY: ClassVar[bool] = True
133
+ """A flag to toggle whether to deprecate the legacy generate/encode API."""
134
+
135
+ DEPRECATE_INIT_POSARGS: ClassVar[bool] = True
136
+ """
137
+ A flag to toggle whether to deprecate positional arguments in
138
+ :meth:`LLM.__init__`.
139
+ """
140
+
141
+ @classmethod
142
+ @contextmanager
143
+ def deprecate_legacy_api(cls):
144
+ cls.DEPRECATE_LEGACY = True
145
+
146
+ yield
147
+
148
+ cls.DEPRECATE_LEGACY = False
149
+
150
+ @deprecate_args(
151
+ start_index=2, # Ignore self and model
152
+ is_deprecated=lambda: LLM.DEPRECATE_INIT_POSARGS,
153
+ additional_message=(
154
+ "All positional arguments other than `model` will be "
155
+ "replaced with keyword arguments in an upcoming version."),
156
+ )
157
+ def __init__(
158
+ self,
159
+ model: str,
160
+ tokenizer: Optional[str] = None,
161
+ tokenizer_mode: str = "auto",
162
+ skip_tokenizer_init: bool = False,
163
+ trust_remote_code: bool = False,
164
+ allowed_local_media_path: str = "",
165
+ tensor_parallel_size: int = 1,
166
+ dtype: str = "auto",
167
+ quantization: Optional[str] = None,
168
+ revision: Optional[str] = None,
169
+ tokenizer_revision: Optional[str] = None,
170
+ seed: int = 0,
171
+ gpu_memory_utilization: float = 0.9,
172
+ swap_space: float = 4,
173
+ cpu_offload_gb: float = 0,
174
+ enforce_eager: Optional[bool] = None,
175
+ max_seq_len_to_capture: int = 8192,
176
+ disable_custom_all_reduce: bool = False,
177
+ disable_async_output_proc: bool = False,
178
+ hf_overrides: Optional[HfOverrides] = None,
179
+ mm_processor_kwargs: Optional[Dict[str, Any]] = None,
180
+ # After positional args are removed, move this right below `model`
181
+ task: TaskOption = "auto",
182
+ override_pooler_config: Optional[PoolerConfig] = None,
183
+ compilation_config: Optional[Union[int, Dict[str, Any]]] = None,
184
+ **kwargs,
185
+ ) -> None:
186
+ '''
187
+ LLM constructor.
188
+
189
+ Note: if enforce_eager is unset (enforce_eager is None)
190
+ it defaults to False.
191
+ '''
192
+
193
+ if "disable_log_stats" not in kwargs:
194
+ kwargs["disable_log_stats"] = True
195
+
196
+ if "worker_cls" in kwargs:
197
+ worker_cls = kwargs["worker_cls"]
198
+ # if the worker_cls is not qualified string name,
199
+ # we serialize it using cloudpickle to avoid pickling issues
200
+ if isinstance(worker_cls, type):
201
+ kwargs["worker_cls"] = cloudpickle.dumps(worker_cls)
202
+
203
+ if compilation_config is not None:
204
+ if isinstance(compilation_config, (int, dict)):
205
+ compilation_config_instance = CompilationConfig.from_cli(
206
+ str(compilation_config))
207
+ else:
208
+ compilation_config_instance = compilation_config
209
+ else:
210
+ compilation_config_instance = None
211
+
212
+ engine_args = EngineArgs(
213
+ model=model,
214
+ task=task,
215
+ tokenizer=tokenizer,
216
+ tokenizer_mode=tokenizer_mode,
217
+ skip_tokenizer_init=skip_tokenizer_init,
218
+ trust_remote_code=trust_remote_code,
219
+ allowed_local_media_path=allowed_local_media_path,
220
+ tensor_parallel_size=tensor_parallel_size,
221
+ dtype=dtype,
222
+ quantization=quantization,
223
+ revision=revision,
224
+ tokenizer_revision=tokenizer_revision,
225
+ seed=seed,
226
+ gpu_memory_utilization=gpu_memory_utilization,
227
+ swap_space=swap_space,
228
+ cpu_offload_gb=cpu_offload_gb,
229
+ enforce_eager=enforce_eager,
230
+ max_seq_len_to_capture=max_seq_len_to_capture,
231
+ disable_custom_all_reduce=disable_custom_all_reduce,
232
+ disable_async_output_proc=disable_async_output_proc,
233
+ hf_overrides=hf_overrides,
234
+ mm_processor_kwargs=mm_processor_kwargs,
235
+ override_pooler_config=override_pooler_config,
236
+ compilation_config=compilation_config_instance,
237
+ **kwargs,
238
+ )
239
+ # Logic to switch between engines is done at runtime instead of import
240
+ # to avoid import order issues
241
+ self.engine_class = self.get_engine_class()
242
+ self.llm_engine = self.engine_class.from_engine_args(
243
+ engine_args, usage_context=UsageContext.LLM_CLASS)
244
+
245
+ self.request_counter = Counter()
246
+
247
+ @staticmethod
248
+ def get_engine_class() -> Type[LLMEngine]:
249
+ if envs.VLLM_USE_V1:
250
+ # Lazy import: the v1 package isn't distributed
251
+ from vllm.v1.engine.llm_engine import LLMEngine as V1LLMEngine
252
+ return V1LLMEngine # type: ignore
253
+ return LLMEngine
254
+
255
+ def get_tokenizer(self) -> AnyTokenizer:
256
+ return self.llm_engine.get_tokenizer_group(TokenizerGroup).tokenizer
257
+
258
+ def set_tokenizer(self, tokenizer: AnyTokenizer) -> None:
259
+ tokenizer_group = self.llm_engine.get_tokenizer_group(TokenizerGroup)
260
+
261
+ # While CachedTokenizer is dynamic, have no choice but
262
+ # compare class name. Misjudgment will arise from
263
+ # user-defined tokenizer started with 'Cached'
264
+ if tokenizer.__class__.__name__.startswith("Cached"):
265
+ tokenizer_group.tokenizer = tokenizer
266
+ else:
267
+ tokenizer_group.tokenizer = get_cached_tokenizer(tokenizer)
268
+
269
+ def get_default_sampling_params(self) -> SamplingParams:
270
+ diff_sampling_param = (
271
+ self.llm_engine.model_config.get_diff_sampling_param())
272
+ if diff_sampling_param:
273
+ return SamplingParams.from_optional(**diff_sampling_param)
274
+ return SamplingParams()
275
+
276
+ @overload
277
+ def generate(
278
+ self,
279
+ prompts: Union[PromptType, Sequence[PromptType]],
280
+ /,
281
+ sampling_params: Optional[Union[SamplingParams,
282
+ Sequence[SamplingParams]]] = None,
283
+ *,
284
+ use_tqdm: bool = True,
285
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
286
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
287
+ guided_options_request: Optional[Union[LLMGuidedOptions,
288
+ GuidedDecodingRequest]] = None,
289
+ ) -> List[RequestOutput]:
290
+ ...
291
+
292
+ @overload # LEGACY: single (prompt + optional token ids)
293
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
294
+ def generate(
295
+ self,
296
+ prompts: str,
297
+ sampling_params: Optional[Union[SamplingParams,
298
+ List[SamplingParams]]] = None,
299
+ prompt_token_ids: Optional[List[int]] = None,
300
+ use_tqdm: bool = True,
301
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
302
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
303
+ guided_options_request: Optional[Union[LLMGuidedOptions,
304
+ GuidedDecodingRequest]] = None,
305
+ ) -> List[RequestOutput]:
306
+ ...
307
+
308
+ @overload # LEGACY: multi (prompt + optional token ids)
309
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
310
+ def generate(
311
+ self,
312
+ prompts: List[str],
313
+ sampling_params: Optional[Union[SamplingParams,
314
+ List[SamplingParams]]] = None,
315
+ prompt_token_ids: Optional[List[List[int]]] = None,
316
+ use_tqdm: bool = True,
317
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
318
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
319
+ guided_options_request: Optional[Union[LLMGuidedOptions,
320
+ GuidedDecodingRequest]] = None,
321
+ ) -> List[RequestOutput]:
322
+ ...
323
+
324
+ @overload # LEGACY: single (token ids + optional prompt)
325
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
326
+ def generate(
327
+ self,
328
+ prompts: Optional[str] = None,
329
+ sampling_params: Optional[Union[SamplingParams,
330
+ List[SamplingParams]]] = None,
331
+ *,
332
+ prompt_token_ids: List[int],
333
+ use_tqdm: bool = True,
334
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
335
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
336
+ guided_options_request: Optional[Union[LLMGuidedOptions,
337
+ GuidedDecodingRequest]] = None,
338
+ ) -> List[RequestOutput]:
339
+ ...
340
+
341
+ @overload # LEGACY: multi (token ids + optional prompt)
342
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
343
+ def generate(
344
+ self,
345
+ prompts: Optional[List[str]] = None,
346
+ sampling_params: Optional[Union[SamplingParams,
347
+ List[SamplingParams]]] = None,
348
+ *,
349
+ prompt_token_ids: List[List[int]],
350
+ use_tqdm: bool = True,
351
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
352
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
353
+ guided_options_request: Optional[Union[LLMGuidedOptions,
354
+ GuidedDecodingRequest]] = None,
355
+ ) -> List[RequestOutput]:
356
+ ...
357
+
358
+ @overload # LEGACY: single or multi token ids [pos-only]
359
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
360
+ def generate(
361
+ self,
362
+ prompts: None,
363
+ sampling_params: None,
364
+ prompt_token_ids: Union[List[int], List[List[int]]],
365
+ use_tqdm: bool = True,
366
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
367
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
368
+ guided_options_request: Optional[Union[LLMGuidedOptions,
369
+ GuidedDecodingRequest]] = None,
370
+ ) -> List[RequestOutput]:
371
+ ...
372
+
373
+ @deprecate_kwargs(
374
+ "prompt_token_ids",
375
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
376
+ additional_message="Please use the 'prompts' parameter instead.",
377
+ )
378
+ def generate(
379
+ self,
380
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
381
+ Optional[Union[str, List[str]]]] = None,
382
+ sampling_params: Optional[Union[SamplingParams,
383
+ Sequence[SamplingParams]]] = None,
384
+ prompt_token_ids: Optional[Union[List[int], List[List[int]]]] = None,
385
+ use_tqdm: bool = True,
386
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
387
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
388
+ guided_options_request: Optional[Union[LLMGuidedOptions,
389
+ GuidedDecodingRequest]] = None,
390
+ priority: Optional[List[int]] = None,
391
+ ) -> List[RequestOutput]:
392
+ """Generates the completions for the input prompts.
393
+
394
+ This class automatically batches the given prompts, considering
395
+ the memory constraint. For the best performance, put all of your prompts
396
+ into a single list and pass it to this method.
397
+
398
+ Args:
399
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
400
+ for batch inference. See :class:`~vllm.inputs.PromptType`
401
+ for more details about the format of each prompts.
402
+ sampling_params: The sampling parameters for text generation. If
403
+ None, we use the default sampling parameters.
404
+ When it is a single value, it is applied to every prompt.
405
+ When it is a list, the list must have the same length as the
406
+ prompts and it is paired one by one with the prompt.
407
+ use_tqdm: Whether to use tqdm to display the progress bar.
408
+ lora_request: LoRA request to use for generation, if any.
409
+ prompt_adapter_request: Prompt Adapter request to use for
410
+ generation, if any.
411
+ priority: The priority of the requests, if any.
412
+ Only applicable when priority scheduling policy is enabled.
413
+
414
+ Returns:
415
+ A list of ``RequestOutput`` objects containing the
416
+ generated completions in the same order as the input prompts.
417
+
418
+ Note:
419
+ Using ``prompts`` and ``prompt_token_ids`` as keyword parameters is
420
+ considered legacy and may be deprecated in the future. You should
421
+ instead pass them via the ``inputs`` parameter.
422
+ """
423
+ runner_type = self.llm_engine.model_config.runner_type
424
+ if runner_type not in ["generate", "transcription"]:
425
+ messages = [
426
+ "LLM.generate() is only supported for (conditional) generation "
427
+ "models (XForCausalLM, XForConditionalGeneration).",
428
+ ]
429
+
430
+ supported_runner_types = self.llm_engine.model_config \
431
+ .supported_runner_types
432
+ if "generate" in supported_runner_types:
433
+ messages.append(
434
+ "Your model supports the 'generate' runner, but is "
435
+ f"currently initialized for the '{runner_type}' runner. "
436
+ "Please initialize vLLM using `--task generate`.")
437
+
438
+ raise ValueError(" ".join(messages))
439
+
440
+ if prompt_token_ids is not None:
441
+ parsed_prompts = self._convert_v1_inputs(
442
+ prompts=cast(Optional[Union[str, List[str]]], prompts),
443
+ prompt_token_ids=prompt_token_ids,
444
+ )
445
+ else:
446
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
447
+ prompts)
448
+
449
+ if isinstance(guided_options_request, dict):
450
+ if len(guided_options_request) > 1:
451
+ raise ValueError(
452
+ "You can only use one guided decoding but multiple is "
453
+ f"specified: {guided_options_request}")
454
+ guided_options_request = GuidedDecodingRequest(
455
+ **guided_options_request)
456
+
457
+ if sampling_params is None:
458
+ # Use default sampling params.
459
+ sampling_params = self.get_default_sampling_params()
460
+
461
+ self._validate_and_add_requests(
462
+ prompts=parsed_prompts,
463
+ params=sampling_params,
464
+ lora_request=lora_request,
465
+ prompt_adapter_request=prompt_adapter_request,
466
+ guided_options=guided_options_request,
467
+ priority=priority)
468
+
469
+ outputs = self._run_engine(use_tqdm=use_tqdm)
470
+ return self.engine_class.validate_outputs(outputs, RequestOutput)
471
+
472
+ def collective_rpc(self,
473
+ method: Union[str, Callable[..., _R]],
474
+ timeout: Optional[float] = None,
475
+ args: Tuple = (),
476
+ kwargs: Optional[Dict[str, Any]] = None) -> List[_R]:
477
+ """
478
+ Execute an RPC call on all workers.
479
+
480
+ Args:
481
+ method: Name of the worker method to execute, or a callable that
482
+ is serialized and sent to all workers to execute.
483
+
484
+ If the method is a callable, it should accept an additional
485
+ `self` argument, in addition to the arguments passed in `args`
486
+ and `kwargs`. The `self` argument will be the worker object.
487
+ timeout: Maximum time in seconds to wait for execution. Raises a
488
+ :exc:`TimeoutError` on timeout. `None` means wait indefinitely.
489
+ args: Positional arguments to pass to the worker method.
490
+ kwargs: Keyword arguments to pass to the worker method.
491
+
492
+ Returns:
493
+ A list containing the results from each worker.
494
+
495
+ Note:
496
+ It is recommended to use this API to only pass control messages,
497
+ and set up data-plane communication to pass data.
498
+ """
499
+ executor = self.llm_engine.model_executor
500
+ return executor.collective_rpc(method, timeout, args, kwargs)
501
+
502
+ def apply_model(self, func: Callable[[nn.Module], _R]) -> list[_R]:
503
+ """
504
+ Run a function directly on the model inside each worker,
505
+ returning the result for each of them.
506
+ """
507
+ executor = self.llm_engine.model_executor
508
+ return executor.apply_model(func)
509
+
510
+ def beam_search(
511
+ self,
512
+ prompts: List[Union[TokensPrompt, TextPrompt]],
513
+ params: BeamSearchParams,
514
+ ) -> List[BeamSearchOutput]:
515
+ """
516
+ Generate sequences using beam search.
517
+
518
+ Args:
519
+ prompts: A list of prompts. Each prompt can be a string or a list
520
+ of token IDs.
521
+ params: The beam search parameters.
522
+
523
+ TODO: how does beam search work together with length penalty, frequency
524
+ penalty, and stopping criteria, etc.?
525
+ """
526
+
527
+ beam_width = params.beam_width
528
+ max_tokens = params.max_tokens
529
+ temperature = params.temperature
530
+ ignore_eos = params.ignore_eos
531
+ length_penalty = params.length_penalty
532
+
533
+ def sort_beams_key(x: BeamSearchSequence) -> float:
534
+ return get_beam_search_score(x.tokens, x.cum_logprob,
535
+ tokenizer.eos_token_id,
536
+ length_penalty)
537
+
538
+ tokenizer = self.get_tokenizer()
539
+ # generate 2 * beam_width candidates at each step
540
+ # following the huggingface transformers implementation
541
+ # at https://github.com/huggingface/transformers/blob/e15687fffe5c9d20598a19aeab721ae0a7580f8a/src/transformers/generation/beam_search.py#L534 # noqa
542
+ beam_search_params = SamplingParams(logprobs=2 * beam_width,
543
+ max_tokens=1,
544
+ temperature=temperature)
545
+ instances: List[BeamSearchInstance] = []
546
+
547
+ for prompt in prompts:
548
+ if is_token_prompt(prompt):
549
+ prompt_tokens = prompt["prompt_token_ids"]
550
+ else:
551
+ prompt_tokens = tokenizer.encode(prompt["prompt"])
552
+ instances.append(BeamSearchInstance(prompt_tokens))
553
+
554
+ for _ in range(max_tokens):
555
+ all_beams: List[BeamSearchSequence] = list(
556
+ sum((instance.beams for instance in instances), []))
557
+ pos = [0] + list(
558
+ itertools.accumulate(
559
+ len(instance.beams) for instance in instances))
560
+ instance_start_and_end: List[Tuple[int, int]] = list(
561
+ zip(pos[:-1], pos[1:]))
562
+
563
+ if len(all_beams) == 0:
564
+ break
565
+
566
+ prompts_batch = [
567
+ TokensPrompt(prompt_token_ids=beam.tokens)
568
+ for beam in all_beams
569
+ ]
570
+
571
+ # only runs for one step
572
+ # we don't need to use tqdm here
573
+ output = self.generate(prompts_batch,
574
+ sampling_params=beam_search_params,
575
+ use_tqdm=False)
576
+
577
+ for (start, end), instance in zip(instance_start_and_end,
578
+ instances):
579
+ instance_new_beams = []
580
+ for i in range(start, end):
581
+ current_beam = all_beams[i]
582
+ result = output[i]
583
+
584
+ if result.outputs[0].logprobs is not None:
585
+ # if `result.outputs[0].logprobs` is None, it means
586
+ # the sequence is completed because of the max-model-len
587
+ # or abortion. we don't need to add it to the new beams.
588
+ logprobs = result.outputs[0].logprobs[0]
589
+ for token_id, logprob_obj in logprobs.items():
590
+ new_beam = BeamSearchSequence(
591
+ tokens=current_beam.tokens + [token_id],
592
+ logprobs=current_beam.logprobs + [logprobs],
593
+ cum_logprob=current_beam.cum_logprob +
594
+ logprob_obj.logprob)
595
+
596
+ if token_id == tokenizer.eos_token_id and \
597
+ not ignore_eos:
598
+ instance.completed.append(new_beam)
599
+ else:
600
+ instance_new_beams.append(new_beam)
601
+ sorted_beams = sorted(instance_new_beams,
602
+ key=sort_beams_key,
603
+ reverse=True)
604
+ instance.beams = sorted_beams[:beam_width]
605
+
606
+ outputs = []
607
+ for instance in instances:
608
+ instance.completed.extend(instance.beams)
609
+ sorted_completed = sorted(instance.completed,
610
+ key=sort_beams_key,
611
+ reverse=True)
612
+ best_beams = sorted_completed[:beam_width]
613
+
614
+ for beam in best_beams:
615
+ beam.text = tokenizer.decode(beam.tokens)
616
+ outputs.append(BeamSearchOutput(sequences=best_beams))
617
+
618
+ return outputs
619
+
620
+ def chat(
621
+ self,
622
+ messages: Union[List[ChatCompletionMessageParam],
623
+ List[List[ChatCompletionMessageParam]]],
624
+ sampling_params: Optional[Union[SamplingParams,
625
+ List[SamplingParams]]] = None,
626
+ use_tqdm: bool = True,
627
+ lora_request: Optional[LoRARequest] = None,
628
+ chat_template: Optional[str] = None,
629
+ chat_template_content_format: ChatTemplateContentFormatOption = "auto",
630
+ add_generation_prompt: bool = True,
631
+ continue_final_message: bool = False,
632
+ tools: Optional[List[Dict[str, Any]]] = None,
633
+ mm_processor_kwargs: Optional[Dict[str, Any]] = None,
634
+ ) -> List[RequestOutput]:
635
+ """
636
+ Generate responses for a chat conversation.
637
+
638
+ The chat conversation is converted into a text prompt using the
639
+ tokenizer and calls the :meth:`generate` method to generate the
640
+ responses.
641
+
642
+ Multi-modal inputs can be passed in the same way you would pass them
643
+ to the OpenAI API.
644
+
645
+ Args:
646
+ messages: A list of conversations or a single conversation.
647
+
648
+ - Each conversation is represented as a list of messages.
649
+ - Each message is a dictionary with 'role' and 'content' keys.
650
+
651
+ sampling_params: The sampling parameters for text generation.
652
+ If None, we use the default sampling parameters. When it
653
+ is a single value, it is applied to every prompt. When it
654
+ is a list, the list must have the same length as the
655
+ prompts and it is paired one by one with the prompt.
656
+ use_tqdm: Whether to use tqdm to display the progress bar.
657
+ lora_request: LoRA request to use for generation, if any.
658
+ chat_template: The template to use for structuring the chat.
659
+ If not provided, the model's default chat template will be used.
660
+ chat_template_content_format: The format to render message content.
661
+
662
+ - "string" will render the content as a string.
663
+ Example: ``"Who are you?"``
664
+ - "openai" will render the content as a list of dictionaries,
665
+ similar to OpenAI schema.
666
+ Example: ``[{"type": "text", "text": "Who are you?"}]``
667
+
668
+ add_generation_prompt: If True, adds a generation template
669
+ to each message.
670
+ continue_final_message: If True, continues the final message in
671
+ the conversation instead of starting a new one. Cannot be
672
+ ``True`` if ``add_generation_prompt`` is also ``True``.
673
+ mm_processor_kwargs: Multimodal processor kwarg overrides for this
674
+ chat request. Only used for offline requests.
675
+
676
+ Returns:
677
+ A list of ``RequestOutput`` objects containing the generated
678
+ responses in the same order as the input messages.
679
+ """
680
+ list_of_messages: List[List[ChatCompletionMessageParam]]
681
+
682
+ # Handle multi and single conversations
683
+ if is_list_of(messages, list):
684
+ # messages is List[List[...]]
685
+ list_of_messages = cast(List[List[ChatCompletionMessageParam]],
686
+ messages)
687
+ else:
688
+ # messages is List[...]
689
+ list_of_messages = [
690
+ cast(List[ChatCompletionMessageParam], messages)
691
+ ]
692
+
693
+ tokenizer = self.get_tokenizer()
694
+ model_config = self.llm_engine.get_model_config()
695
+ resolved_content_format = resolve_chat_template_content_format(
696
+ chat_template,
697
+ chat_template_content_format,
698
+ tokenizer,
699
+ )
700
+
701
+ prompts: List[Union[TokensPrompt, TextPrompt]] = []
702
+
703
+ for msgs in list_of_messages:
704
+ # NOTE: _parse_chat_message_content_parts() currently doesn't
705
+ # handle mm_processor_kwargs, since there is no implementation in
706
+ # the chat message parsing for it.
707
+ conversation, mm_data = parse_chat_messages(
708
+ msgs,
709
+ model_config,
710
+ tokenizer,
711
+ content_format=resolved_content_format,
712
+ )
713
+
714
+ prompt_data: Union[str, List[int]]
715
+ if isinstance(tokenizer, MistralTokenizer):
716
+ prompt_data = apply_mistral_chat_template(
717
+ tokenizer,
718
+ messages=msgs,
719
+ chat_template=chat_template,
720
+ add_generation_prompt=add_generation_prompt,
721
+ continue_final_message=continue_final_message,
722
+ tools=tools,
723
+ )
724
+ else:
725
+ prompt_data = apply_hf_chat_template(
726
+ tokenizer,
727
+ conversation=conversation,
728
+ chat_template=chat_template,
729
+ add_generation_prompt=add_generation_prompt,
730
+ continue_final_message=continue_final_message,
731
+ tools=tools,
732
+ )
733
+
734
+ prompt: Union[TokensPrompt, TextPrompt]
735
+ if is_list_of(prompt_data, int):
736
+ prompt = TokensPrompt(prompt_token_ids=prompt_data)
737
+ else:
738
+ prompt = TextPrompt(prompt=prompt_data)
739
+
740
+ if mm_data is not None:
741
+ prompt["multi_modal_data"] = mm_data
742
+
743
+ if mm_processor_kwargs is not None:
744
+ prompt["mm_processor_kwargs"] = mm_processor_kwargs
745
+
746
+ prompts.append(prompt)
747
+
748
+ return self.generate(
749
+ prompts,
750
+ sampling_params=sampling_params,
751
+ use_tqdm=use_tqdm,
752
+ lora_request=lora_request,
753
+ )
754
+
755
+ @overload
756
+ def encode(
757
+ self,
758
+ prompts: Union[PromptType, Sequence[PromptType]],
759
+ /,
760
+ pooling_params: Optional[Union[PoolingParams,
761
+ Sequence[PoolingParams]]] = None,
762
+ *,
763
+ use_tqdm: bool = True,
764
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
765
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
766
+ ) -> List[PoolingRequestOutput]:
767
+ ...
768
+
769
+ @overload # LEGACY: single (prompt + optional token ids)
770
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
771
+ def encode(
772
+ self,
773
+ prompts: str,
774
+ pooling_params: Optional[Union[PoolingParams,
775
+ Sequence[PoolingParams]]] = None,
776
+ prompt_token_ids: Optional[List[int]] = None,
777
+ use_tqdm: bool = True,
778
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
779
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
780
+ ) -> List[PoolingRequestOutput]:
781
+ ...
782
+
783
+ @overload # LEGACY: multi (prompt + optional token ids)
784
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
785
+ def encode(
786
+ self,
787
+ prompts: List[str],
788
+ pooling_params: Optional[Union[PoolingParams,
789
+ Sequence[PoolingParams]]] = None,
790
+ prompt_token_ids: Optional[List[List[int]]] = None,
791
+ use_tqdm: bool = True,
792
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
793
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
794
+ ) -> List[PoolingRequestOutput]:
795
+ ...
796
+
797
+ @overload # LEGACY: single (token ids + optional prompt)
798
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
799
+ def encode(
800
+ self,
801
+ prompts: Optional[str] = None,
802
+ pooling_params: Optional[Union[PoolingParams,
803
+ Sequence[PoolingParams]]] = None,
804
+ *,
805
+ prompt_token_ids: List[int],
806
+ use_tqdm: bool = True,
807
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
808
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
809
+ ) -> List[PoolingRequestOutput]:
810
+ ...
811
+
812
+ @overload # LEGACY: multi (token ids + optional prompt)
813
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
814
+ def encode(
815
+ self,
816
+ prompts: Optional[List[str]] = None,
817
+ pooling_params: Optional[Union[PoolingParams,
818
+ Sequence[PoolingParams]]] = None,
819
+ *,
820
+ prompt_token_ids: List[List[int]],
821
+ use_tqdm: bool = True,
822
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
823
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
824
+ ) -> List[PoolingRequestOutput]:
825
+ ...
826
+
827
+ @overload # LEGACY: single or multi token ids [pos-only]
828
+ @deprecated("'prompt_token_ids' will become part of 'prompts'")
829
+ def encode(
830
+ self,
831
+ prompts: None,
832
+ pooling_params: None,
833
+ prompt_token_ids: Union[List[int], List[List[int]]],
834
+ use_tqdm: bool = True,
835
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
836
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
837
+ ) -> List[PoolingRequestOutput]:
838
+ ...
839
+
840
+ @deprecate_kwargs(
841
+ "prompt_token_ids",
842
+ is_deprecated=lambda: LLM.DEPRECATE_LEGACY,
843
+ additional_message="Please use the 'prompts' parameter instead.",
844
+ )
845
+ def encode(
846
+ self,
847
+ prompts: Union[Union[PromptType, Sequence[PromptType]],
848
+ Optional[Union[str, List[str]]]] = None,
849
+ pooling_params: Optional[Union[PoolingParams,
850
+ Sequence[PoolingParams]]] = None,
851
+ prompt_token_ids: Optional[Union[List[int], List[List[int]]]] = None,
852
+ use_tqdm: bool = True,
853
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
854
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
855
+ ) -> List[PoolingRequestOutput]:
856
+ """Apply pooling to the hidden states corresponding to the input
857
+ prompts.
858
+
859
+ This class automatically batches the given prompts, considering
860
+ the memory constraint. For the best performance, put all of your prompts
861
+ into a single list and pass it to this method.
862
+
863
+ Args:
864
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
865
+ for batch inference. See :class:`~vllm.inputs.PromptType`
866
+ for more details about the format of each prompts.
867
+ pooling_params: The pooling parameters for pooling. If None, we
868
+ use the default pooling parameters.
869
+ use_tqdm: Whether to use tqdm to display the progress bar.
870
+ lora_request: LoRA request to use for generation, if any.
871
+ prompt_adapter_request: Prompt Adapter request to use for
872
+ generation, if any.
873
+
874
+ Returns:
875
+ A list of ``PoolingRequestOutput`` objects containing the
876
+ pooled hidden states in the same order as the input prompts.
877
+
878
+ Note:
879
+ Using ``prompts`` and ``prompt_token_ids`` as keyword parameters is
880
+ considered legacy and may be deprecated in the future. You should
881
+ instead pass them via the ``inputs`` parameter.
882
+ """
883
+ runner_type = self.llm_engine.model_config.runner_type
884
+ if runner_type != "pooling":
885
+ messages = ["LLM.encode() is only supported for pooling models."]
886
+
887
+ supported_runner_types = self.llm_engine.model_config \
888
+ .supported_runner_types
889
+ if "pooling" in supported_runner_types:
890
+ messages.append(
891
+ "Your model supports the 'pooling' runner, but is "
892
+ f"currently initialized for the '{runner_type}' runner. "
893
+ "Please initialize vLLM using `--task embed`, "
894
+ "`--task classify`, `--task score` etc.")
895
+
896
+ raise ValueError(" ".join(messages))
897
+
898
+ if prompt_token_ids is not None:
899
+ parsed_prompts = self._convert_v1_inputs(
900
+ prompts=cast(Optional[Union[str, List[str]]], prompts),
901
+ prompt_token_ids=prompt_token_ids,
902
+ )
903
+ else:
904
+ parsed_prompts = cast(Union[PromptType, Sequence[PromptType]],
905
+ prompts)
906
+
907
+ if pooling_params is None:
908
+ # Use default pooling params.
909
+ pooling_params = PoolingParams()
910
+
911
+ self._validate_and_add_requests(
912
+ prompts=parsed_prompts,
913
+ params=pooling_params,
914
+ lora_request=lora_request,
915
+ prompt_adapter_request=prompt_adapter_request,
916
+ )
917
+
918
+ outputs = self._run_engine(use_tqdm=use_tqdm)
919
+ return self.engine_class.validate_outputs(outputs,
920
+ PoolingRequestOutput)
921
+
922
+ def embed(
923
+ self,
924
+ prompts: Union[PromptType, Sequence[PromptType]],
925
+ /,
926
+ *,
927
+ use_tqdm: bool = True,
928
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
929
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
930
+ ) -> List[EmbeddingRequestOutput]:
931
+ """
932
+ Generate an embedding vector for each prompt.
933
+
934
+ This class automatically batches the given prompts, considering
935
+ the memory constraint. For the best performance, put all of your prompts
936
+ into a single list and pass it to this method.
937
+
938
+ Args:
939
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
940
+ for batch inference. See :class:`~vllm.inputs.PromptType`
941
+ for more details about the format of each prompts.
942
+ use_tqdm: Whether to use tqdm to display the progress bar.
943
+ lora_request: LoRA request to use for generation, if any.
944
+ prompt_adapter_request: Prompt Adapter request to use for
945
+ generation, if any.
946
+
947
+ Returns:
948
+ A list of ``EmbeddingRequestOutput`` objects containing the
949
+ embedding vectors in the same order as the input prompts.
950
+ """
951
+ if self.llm_engine.model_config.task != "embed":
952
+ raise ValueError(
953
+ "Embedding API is only enabled for `--task embed`")
954
+
955
+ items = self.encode(prompts,
956
+ use_tqdm=use_tqdm,
957
+ lora_request=lora_request,
958
+ prompt_adapter_request=prompt_adapter_request)
959
+
960
+ return [EmbeddingRequestOutput.from_base(item) for item in items]
961
+
962
+ def classify(
963
+ self,
964
+ prompts: Union[PromptType, Sequence[PromptType]],
965
+ /,
966
+ *,
967
+ use_tqdm: bool = True,
968
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
969
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
970
+ ) -> List[ClassificationRequestOutput]:
971
+ """
972
+ Generate class logits for each prompt.
973
+
974
+ This class automatically batches the given prompts, considering
975
+ the memory constraint. For the best performance, put all of your prompts
976
+ into a single list and pass it to this method.
977
+
978
+ Args:
979
+ prompts: The prompts to the LLM. You may pass a sequence of prompts
980
+ for batch inference. See :class:`~vllm.inputs.PromptType`
981
+ for more details about the format of each prompts.
982
+ use_tqdm: Whether to use tqdm to display the progress bar.
983
+ lora_request: LoRA request to use for generation, if any.
984
+ prompt_adapter_request: Prompt Adapter request to use for
985
+ generation, if any.
986
+
987
+ Returns:
988
+ A list of ``ClassificationRequestOutput`` objects containing the
989
+ embedding vectors in the same order as the input prompts.
990
+ """
991
+ if self.llm_engine.model_config.task != "classify":
992
+ raise ValueError(
993
+ "Classification API is only enabled for `--task classify`")
994
+
995
+ items = self.encode(prompts,
996
+ use_tqdm=use_tqdm,
997
+ lora_request=lora_request,
998
+ prompt_adapter_request=prompt_adapter_request)
999
+
1000
+ return [ClassificationRequestOutput.from_base(item) for item in items]
1001
+
1002
+ def _embedding_score(
1003
+ self,
1004
+ tokenizer: AnyTokenizer,
1005
+ text_1: List[Union[str, TextPrompt, TokensPrompt]],
1006
+ text_2: List[Union[str, TextPrompt, TokensPrompt]],
1007
+ truncate_prompt_tokens: Optional[int] = None,
1008
+ use_tqdm: bool = True,
1009
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
1010
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1011
+ ) -> List[ScoringRequestOutput]:
1012
+
1013
+ encoded_output = self.encode(
1014
+ text_1 + text_2,
1015
+ use_tqdm=use_tqdm,
1016
+ lora_request=lora_request,
1017
+ prompt_adapter_request=prompt_adapter_request)
1018
+ encoded_output_1 = encoded_output[0:len(text_1)]
1019
+ encoded_output_2 = encoded_output[len(text_1):]
1020
+
1021
+ if len(encoded_output_1) == 1:
1022
+ encoded_output_1 = encoded_output_1 * len(encoded_output_2)
1023
+
1024
+ output_pairs = [(t1, t2)
1025
+ for t1, t2 in zip(encoded_output_1, encoded_output_2)]
1026
+
1027
+ scores = []
1028
+ scorer = torch.nn.CosineSimilarity(0)
1029
+
1030
+ for embed_1, embed_2 in output_pairs:
1031
+ pair_score = scorer(embed_1.outputs.data, embed_2.outputs.data)
1032
+
1033
+ if (pad_token_id := getattr(tokenizer, "pad_token_id",
1034
+ None)) is not None:
1035
+ tokens = embed_1.prompt_token_ids + [
1036
+ pad_token_id
1037
+ ] + embed_2.prompt_token_ids
1038
+ else:
1039
+ tokens = embed_1.prompt_token_ids + embed_2.prompt_token_ids
1040
+
1041
+ scores.append(
1042
+ PoolingRequestOutput(
1043
+ request_id=f"{embed_1.request_id}_{embed_2.request_id}",
1044
+ outputs=pair_score,
1045
+ prompt_token_ids=tokens,
1046
+ finished=True))
1047
+
1048
+ items = self.engine_class.validate_outputs(scores,
1049
+ PoolingRequestOutput)
1050
+ return [ScoringRequestOutput.from_base(item) for item in items]
1051
+
1052
+ def _cross_encoding_score(
1053
+ self,
1054
+ tokenizer: AnyTokenizer,
1055
+ text_1: List[str],
1056
+ text_2: List[str],
1057
+ truncate_prompt_tokens: Optional[int] = None,
1058
+ use_tqdm: bool = True,
1059
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
1060
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1061
+ ) -> List[ScoringRequestOutput]:
1062
+
1063
+ if isinstance(tokenizer, MistralTokenizer):
1064
+ raise ValueError(
1065
+ "Score API is only enabled for `--task embed or score`")
1066
+
1067
+ if len(text_1) == 1:
1068
+ text_1 = text_1 * len(text_2)
1069
+
1070
+ input_pairs = [(t1, t2) for t1, t2 in zip(text_1, text_2)]
1071
+
1072
+ pooling_params = PoolingParams()
1073
+
1074
+ tokenization_kwargs: Dict[str, Any] = {}
1075
+ if truncate_prompt_tokens is not None:
1076
+ tokenization_kwargs["truncation"] = True
1077
+ tokenization_kwargs["max_length"] = truncate_prompt_tokens
1078
+
1079
+ parsed_prompts = []
1080
+
1081
+ for q, t in input_pairs:
1082
+ prompt_inputs = tokenizer(text=q,
1083
+ text_pair=t,
1084
+ **tokenization_kwargs)
1085
+ engine_prompt = TokensPrompt(
1086
+ prompt_token_ids=prompt_inputs["input_ids"],
1087
+ token_type_ids=prompt_inputs.get("token_type_ids"))
1088
+ parsed_prompts.append(engine_prompt)
1089
+
1090
+ self._validate_and_add_requests(
1091
+ prompts=parsed_prompts,
1092
+ params=pooling_params,
1093
+ lora_request=lora_request,
1094
+ prompt_adapter_request=prompt_adapter_request,
1095
+ )
1096
+
1097
+ outputs = self._run_engine(use_tqdm=use_tqdm)
1098
+ items = self.engine_class.validate_outputs(outputs,
1099
+ PoolingRequestOutput)
1100
+
1101
+ return [ScoringRequestOutput.from_base(item) for item in items]
1102
+
1103
+ def score(
1104
+ self,
1105
+ text_1: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1106
+ text_2: Union[SingletonPrompt, Sequence[SingletonPrompt]],
1107
+ /,
1108
+ *,
1109
+ truncate_prompt_tokens: Optional[int] = None,
1110
+ use_tqdm: bool = True,
1111
+ lora_request: Optional[Union[List[LoRARequest], LoRARequest]] = None,
1112
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1113
+ ) -> List[ScoringRequestOutput]:
1114
+ """Generate similarity scores for all pairs ``<text,text_pair>``.
1115
+
1116
+ The inputs can be ``1 -> 1``, ``1 -> N`` or ``N -> N``.
1117
+ In the ``1 - N`` case the ``text_1`` sentence will be replicated ``N``
1118
+ times to pair with the ``text_2`` sentences.
1119
+ The input pairs are used to build a list of prompts for the
1120
+ cross encoder model. This class automatically batches the prompts,
1121
+ considering the memory constraint. For the best performance, put all
1122
+ of your texts into a single list and pass it to this method.
1123
+
1124
+ Args:
1125
+ text_1: can be a single prompt or a list of prompts, in which
1126
+ case it has to have the same length as the ``text_2`` list
1127
+ text_2: The texts to pair with the query to form the input
1128
+ to the LLM. See :class:`~vllm.inputs.PromptType` for
1129
+ more details about the format of each prompts.
1130
+ use_tqdm: Whether to use tqdm to display the progress bar.
1131
+ lora_request: LoRA request to use for generation, if any.
1132
+ prompt_adapter_request: Prompt Adapter request to use for
1133
+ generation, if any.
1134
+
1135
+ Returns:
1136
+ A list of ``ScoringRequestOutput`` objects containing the
1137
+ generated scores in the same order as the input prompts.
1138
+ """
1139
+ runner_type = self.llm_engine.model_config.runner_type
1140
+ if runner_type != "pooling":
1141
+ messages = ["LLM.score() is only supported for pooling models."]
1142
+
1143
+ supported_runner_types = self.llm_engine.model_config \
1144
+ .supported_runner_types
1145
+ if "pooling" in supported_runner_types:
1146
+ messages.append(
1147
+ "Your model supports the 'pooling' runner, but is "
1148
+ f"currently initialized for the '{runner_type}' runner. "
1149
+ "Please initialize vLLM using `--task embed`, "
1150
+ "`--task classify`, `--task score` etc.")
1151
+
1152
+ raise ValueError(" ".join(messages))
1153
+
1154
+ if self.llm_engine.model_config.task not in ("embed", "score"):
1155
+ raise ValueError(
1156
+ "Score API is only enabled for `--task embed or --task score`")
1157
+
1158
+ # the tokenizer for models such as
1159
+ # "cross-encoder/ms-marco-MiniLM-L-6-v2" doesn't support passing
1160
+ # lists of tokens to the `text` and `text_pair` kwargs
1161
+ tokenizer = self.llm_engine.get_tokenizer()
1162
+
1163
+ def ensure_str(prompt: SingletonPrompt):
1164
+ if isinstance(prompt, dict):
1165
+ if "multi_modal_data" in prompt:
1166
+ raise ValueError("Multi-modal prompt is not "
1167
+ "supported for scoring")
1168
+ elif "prompt_token_ids" in prompt:
1169
+ prompt = tokenizer.decode(
1170
+ cast(TokensPrompt, prompt)["prompt_token_ids"])
1171
+ elif "prompt" in prompt:
1172
+ prompt = cast(TextPrompt, prompt)["prompt"]
1173
+ assert type(prompt) is str
1174
+ return prompt
1175
+
1176
+ if isinstance(text_1, (str, dict)):
1177
+ # Convert a single prompt to a list.
1178
+ text_1 = [text_1]
1179
+ input_text_1: List[str] = [ensure_str(t) for t in text_1]
1180
+
1181
+ if isinstance(text_2, (str, dict)):
1182
+ # Convert a single prompt to a list.
1183
+ text_2 = [text_2]
1184
+ input_text_2: List[str] = [ensure_str(t) for t in text_2]
1185
+
1186
+ if len(input_text_1) > 1 and len(input_text_1) != len(input_text_2):
1187
+ raise ValueError("Input lengths must be either 1:1, 1:N or N:N")
1188
+ if len(input_text_1) == 0:
1189
+ raise ValueError("At least one text element must be given")
1190
+ if len(input_text_2) == 0:
1191
+ raise ValueError("At least one text_pair element must be given")
1192
+
1193
+ if self.llm_engine.model_config.is_cross_encoder:
1194
+ return self._cross_encoding_score(tokenizer, input_text_1,
1195
+ input_text_2,
1196
+ truncate_prompt_tokens, use_tqdm,
1197
+ lora_request,
1198
+ prompt_adapter_request)
1199
+ else:
1200
+
1201
+ return self._embedding_score(
1202
+ tokenizer,
1203
+ input_text_1, # type: ignore[arg-type]
1204
+ input_text_2, # type: ignore[arg-type]
1205
+ truncate_prompt_tokens,
1206
+ use_tqdm,
1207
+ lora_request,
1208
+ prompt_adapter_request)
1209
+
1210
+ def start_profile(self) -> None:
1211
+ self.llm_engine.start_profile()
1212
+
1213
+ def stop_profile(self) -> None:
1214
+ self.llm_engine.stop_profile()
1215
+
1216
+ def reset_prefix_cache(self) -> bool:
1217
+ return self.llm_engine.reset_prefix_cache()
1218
+
1219
+ def sleep(self, level: int = 1):
1220
+ """
1221
+ Put the engine to sleep. The engine should not process any requests.
1222
+ The caller should guarantee that no requests are being processed
1223
+ during the sleep period, before `wake_up` is called.
1224
+
1225
+ :param level: The sleep level. Level 1 sleep will offload the model
1226
+ weights and discard the kv cache. The content of kv cache is
1227
+ forgotten. Level 1 sleep is good for sleeping and waking up the
1228
+ engine to run the same model again. The model weights are backed
1229
+ up in CPU memory. Please make sure there's enough CPU memory to
1230
+ store the model weights. Level 2 sleep will discard both the model
1231
+ weights and the kv cache. The content of both the model weights
1232
+ and kv cache is forgotten. Level 2 sleep is good for sleeping and
1233
+ waking up the engine to run a different model or update the model,
1234
+ where previous model weights are not needed. It reduces CPU memory
1235
+ pressure.
1236
+ """
1237
+ self.reset_prefix_cache()
1238
+ self.llm_engine.sleep(level=level)
1239
+
1240
+ def wake_up(self):
1241
+ """
1242
+ Wake up the engine from sleep mode. See the :meth:`sleep` method
1243
+ for more details."""
1244
+ self.llm_engine.wake_up()
1245
+
1246
+ # LEGACY
1247
+ def _convert_v1_inputs(
1248
+ self,
1249
+ prompts: Optional[Union[str, List[str]]],
1250
+ prompt_token_ids: Optional[Union[List[int], List[List[int]]]],
1251
+ ):
1252
+ # skip_tokenizer_init is now checked in engine
1253
+
1254
+ if prompts is not None:
1255
+ prompts = [p["content"] for p in parse_and_batch_prompt(prompts)]
1256
+ if prompt_token_ids is not None:
1257
+ prompt_token_ids = [
1258
+ p["content"] for p in parse_and_batch_prompt(prompt_token_ids)
1259
+ ]
1260
+
1261
+ num_requests = None
1262
+ if prompts is not None:
1263
+ num_requests = len(prompts)
1264
+ if prompt_token_ids is not None:
1265
+ if (num_requests is not None
1266
+ and num_requests != len(prompt_token_ids)):
1267
+ raise ValueError("The lengths of prompts and prompt_token_ids "
1268
+ "must be the same.")
1269
+
1270
+ num_requests = len(prompt_token_ids)
1271
+ if num_requests is None:
1272
+ raise ValueError("Either prompts or prompt_token_ids must be "
1273
+ "provided.")
1274
+
1275
+ parsed_prompts: List[PromptType] = []
1276
+ for i in range(num_requests):
1277
+ item: PromptType
1278
+
1279
+ if prompts is not None:
1280
+ item = TextPrompt(prompt=prompts[i])
1281
+ elif prompt_token_ids is not None:
1282
+ item = TokensPrompt(prompt_token_ids=prompt_token_ids[i])
1283
+ else:
1284
+ raise AssertionError
1285
+
1286
+ parsed_prompts.append(item)
1287
+
1288
+ return parsed_prompts
1289
+
1290
+ def _validate_and_add_requests(
1291
+ self,
1292
+ prompts: Union[PromptType, Sequence[PromptType]],
1293
+ params: Union[SamplingParams, Sequence[SamplingParams], PoolingParams,
1294
+ Sequence[PoolingParams]],
1295
+ lora_request: Optional[Union[Sequence[LoRARequest], LoRARequest]],
1296
+ prompt_adapter_request: Optional[PromptAdapterRequest],
1297
+ guided_options: Optional[GuidedDecodingRequest] = None,
1298
+ priority: Optional[List[int]] = None,
1299
+ ) -> None:
1300
+ if guided_options is not None:
1301
+ warnings.warn(
1302
+ "guided_options_request is deprecated, use "
1303
+ "SamplingParams.guided_decoding instead",
1304
+ DeprecationWarning,
1305
+ stacklevel=2,
1306
+ )
1307
+
1308
+ if isinstance(prompts, (str, dict)):
1309
+ # Convert a single prompt to a list.
1310
+ prompts = [prompts]
1311
+
1312
+ num_requests = len(prompts)
1313
+ if isinstance(params, list) and len(params) != num_requests:
1314
+ raise ValueError("The lengths of prompts and params "
1315
+ "must be the same.")
1316
+ if isinstance(lora_request,
1317
+ list) and len(lora_request) != num_requests:
1318
+ raise ValueError("The lengths of prompts and lora_request "
1319
+ "must be the same.")
1320
+
1321
+ for sp in params if isinstance(params, list) else (params, ):
1322
+ if isinstance(sp, SamplingParams):
1323
+ self._add_guided_params(sp, guided_options)
1324
+
1325
+ # We only care about the final output
1326
+ sp.output_kind = RequestOutputKind.FINAL_ONLY
1327
+
1328
+ # Add requests to the engine.
1329
+ for i, prompt in enumerate(prompts):
1330
+ self._add_request(
1331
+ prompt,
1332
+ params[i] if isinstance(params, Sequence) else params,
1333
+ lora_request=lora_request[i] if isinstance(
1334
+ lora_request, Sequence) else lora_request,
1335
+ prompt_adapter_request=prompt_adapter_request,
1336
+ priority=priority[i] if priority else 0,
1337
+ )
1338
+
1339
+ def _add_request(
1340
+ self,
1341
+ prompt: PromptType,
1342
+ params: Union[SamplingParams, PoolingParams],
1343
+ lora_request: Optional[LoRARequest] = None,
1344
+ prompt_adapter_request: Optional[PromptAdapterRequest] = None,
1345
+ priority: int = 0,
1346
+ ) -> None:
1347
+ request_id = str(next(self.request_counter))
1348
+ self.llm_engine.add_request(
1349
+ request_id,
1350
+ prompt,
1351
+ params,
1352
+ lora_request=lora_request,
1353
+ prompt_adapter_request=prompt_adapter_request,
1354
+ priority=priority,
1355
+ )
1356
+
1357
+ def _add_guided_params(
1358
+ self,
1359
+ params: SamplingParams,
1360
+ guided_options: Optional[GuidedDecodingRequest] = None):
1361
+ if guided_options is None:
1362
+ return params
1363
+
1364
+ if params.guided_decoding is not None:
1365
+ raise ValueError("Cannot set both guided_options_request and"
1366
+ "params.guided_decoding.")
1367
+
1368
+ params.guided_decoding = GuidedDecodingParams(
1369
+ json=guided_options.guided_json,
1370
+ regex=guided_options.guided_regex,
1371
+ choice=guided_options.guided_choice,
1372
+ grammar=guided_options.guided_grammar,
1373
+ json_object=guided_options.guided_json_object,
1374
+ backend=guided_options.guided_decoding_backend,
1375
+ whitespace_pattern=guided_options.guided_whitespace_pattern)
1376
+ return params
1377
+
1378
+ def _run_engine(
1379
+ self, *, use_tqdm: bool
1380
+ ) -> List[Union[RequestOutput, PoolingRequestOutput]]:
1381
+ # Initialize tqdm.
1382
+ if use_tqdm:
1383
+ num_requests = self.llm_engine.get_num_unfinished_requests()
1384
+ pbar = tqdm(
1385
+ total=num_requests,
1386
+ desc="Processed prompts",
1387
+ dynamic_ncols=True,
1388
+ postfix=(f"est. speed input: {0:.2f} toks/s, "
1389
+ f"output: {0:.2f} toks/s"),
1390
+ )
1391
+
1392
+ # Run the engine.
1393
+ outputs: List[Union[RequestOutput, PoolingRequestOutput]] = []
1394
+ total_in_toks = 0
1395
+ total_out_toks = 0
1396
+ while self.llm_engine.has_unfinished_requests():
1397
+ step_outputs = self.llm_engine.step()
1398
+ for output in step_outputs:
1399
+ if output.finished:
1400
+ outputs.append(output)
1401
+ if use_tqdm:
1402
+ if isinstance(output, RequestOutput):
1403
+ # Calculate tokens only for RequestOutput
1404
+ assert output.prompt_token_ids is not None
1405
+ total_in_toks += len(output.prompt_token_ids)
1406
+ in_spd = total_in_toks / pbar.format_dict["elapsed"]
1407
+ total_out_toks += sum(
1408
+ len(stp.token_ids) for stp in output.outputs)
1409
+ out_spd = (total_out_toks /
1410
+ pbar.format_dict["elapsed"])
1411
+ pbar.postfix = (
1412
+ f"est. speed input: {in_spd:.2f} toks/s, "
1413
+ f"output: {out_spd:.2f} toks/s")
1414
+ pbar.update(1)
1415
+
1416
+ if use_tqdm:
1417
+ pbar.close()
1418
+ # Sort the outputs by request ID.
1419
+ # This is necessary because some requests may be finished earlier than
1420
+ # its previous requests.
1421
+ return sorted(outputs, key=lambda x: int(x.request_id))
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/logger.py ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from typing import List, Optional, Union
4
+
5
+ from vllm.logger import init_logger
6
+ from vllm.lora.request import LoRARequest
7
+ from vllm.pooling_params import PoolingParams
8
+ from vllm.prompt_adapter.request import PromptAdapterRequest
9
+ from vllm.sampling_params import BeamSearchParams, SamplingParams
10
+
11
+ logger = init_logger(__name__)
12
+
13
+
14
+ class RequestLogger:
15
+
16
+ def __init__(self, *, max_log_len: Optional[int]) -> None:
17
+ super().__init__()
18
+
19
+ self.max_log_len = max_log_len
20
+
21
+ def log_inputs(
22
+ self,
23
+ request_id: str,
24
+ prompt: Optional[str],
25
+ prompt_token_ids: Optional[List[int]],
26
+ params: Optional[Union[SamplingParams, PoolingParams,
27
+ BeamSearchParams]],
28
+ lora_request: Optional[LoRARequest],
29
+ prompt_adapter_request: Optional[PromptAdapterRequest],
30
+ ) -> None:
31
+ max_log_len = self.max_log_len
32
+ if max_log_len is not None:
33
+ if prompt is not None:
34
+ prompt = prompt[:max_log_len]
35
+
36
+ if prompt_token_ids is not None:
37
+ prompt_token_ids = prompt_token_ids[:max_log_len]
38
+
39
+ logger.info(
40
+ "Received request %s: prompt: %r, "
41
+ "params: %s, prompt_token_ids: %s, "
42
+ "lora_request: %s, prompt_adapter_request: %s.", request_id,
43
+ prompt, params, prompt_token_ids, lora_request,
44
+ prompt_adapter_request)
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__init__.py ADDED
File without changes
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/api_server.cpython-310.pyc ADDED
Binary file (25.3 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_engine.cpython-310.pyc ADDED
Binary file (13.6 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_pooling.cpython-310.pyc ADDED
Binary file (5.76 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_rerank.cpython-310.pyc ADDED
Binary file (6.04 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/__pycache__/serving_tokenization.cpython-310.pyc ADDED
Binary file (3.52 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/api_server.py ADDED
@@ -0,0 +1,991 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import atexit
5
+ import gc
6
+ import importlib
7
+ import inspect
8
+ import multiprocessing
9
+ import os
10
+ import re
11
+ import signal
12
+ import socket
13
+ import tempfile
14
+ import uuid
15
+ from argparse import Namespace
16
+ from contextlib import asynccontextmanager
17
+ from functools import partial
18
+ from http import HTTPStatus
19
+ from typing import Annotated, AsyncIterator, Dict, Optional, Set, Tuple, Union
20
+
21
+ import uvloop
22
+ from fastapi import APIRouter, Depends, FastAPI, Form, HTTPException, Request
23
+ from fastapi.exceptions import RequestValidationError
24
+ from fastapi.middleware.cors import CORSMiddleware
25
+ from fastapi.responses import JSONResponse, Response, StreamingResponse
26
+ from starlette.datastructures import State
27
+ from starlette.routing import Mount
28
+ from typing_extensions import assert_never
29
+
30
+ import vllm.envs as envs
31
+ from vllm.config import ModelConfig
32
+ from vllm.engine.arg_utils import AsyncEngineArgs
33
+ from vllm.engine.async_llm_engine import AsyncLLMEngine # type: ignore
34
+ from vllm.engine.multiprocessing.client import MQLLMEngineClient
35
+ from vllm.engine.multiprocessing.engine import run_mp_engine
36
+ from vllm.engine.protocol import EngineClient
37
+ from vllm.entrypoints.chat_utils import load_chat_template
38
+ from vllm.entrypoints.launcher import serve_http
39
+ from vllm.entrypoints.logger import RequestLogger
40
+ from vllm.entrypoints.openai.cli_args import (make_arg_parser,
41
+ validate_parsed_serve_args)
42
+ # yapf conflicts with isort for this block
43
+ # yapf: disable
44
+ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
45
+ ChatCompletionResponse,
46
+ CompletionRequest,
47
+ CompletionResponse,
48
+ DetokenizeRequest,
49
+ DetokenizeResponse,
50
+ EmbeddingChatRequest,
51
+ EmbeddingCompletionRequest,
52
+ EmbeddingRequest,
53
+ EmbeddingResponse,
54
+ EmbeddingResponseData,
55
+ ErrorResponse,
56
+ LoadLoraAdapterRequest,
57
+ PoolingChatRequest,
58
+ PoolingCompletionRequest,
59
+ PoolingRequest, PoolingResponse,
60
+ RerankRequest, RerankResponse,
61
+ ScoreRequest, ScoreResponse,
62
+ TokenizeRequest,
63
+ TokenizeResponse,
64
+ TranscriptionRequest,
65
+ TranscriptionResponse,
66
+ UnloadLoraAdapterRequest)
67
+ from vllm.entrypoints.openai.reasoning_parsers import ReasoningParserManager
68
+ # yapf: enable
69
+ from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
70
+ from vllm.entrypoints.openai.serving_completion import OpenAIServingCompletion
71
+ from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding
72
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
73
+ from vllm.entrypoints.openai.serving_models import (BaseModelPath,
74
+ OpenAIServingModels)
75
+ from vllm.entrypoints.openai.serving_pooling import OpenAIServingPooling
76
+ from vllm.entrypoints.openai.serving_rerank import JinaAIServingRerank
77
+ from vllm.entrypoints.openai.serving_score import OpenAIServingScores
78
+ from vllm.entrypoints.openai.serving_tokenization import (
79
+ OpenAIServingTokenization)
80
+ from vllm.entrypoints.openai.serving_transcription import (
81
+ OpenAIServingTranscription)
82
+ from vllm.entrypoints.openai.tool_parsers import ToolParserManager
83
+ from vllm.entrypoints.utils import with_cancellation
84
+ from vllm.logger import init_logger
85
+ from vllm.usage.usage_lib import UsageContext
86
+ from vllm.utils import (FlexibleArgumentParser, get_open_zmq_ipc_path,
87
+ is_valid_ipv6_address, set_ulimit)
88
+ from vllm.version import __version__ as VLLM_VERSION
89
+
90
+ TIMEOUT_KEEP_ALIVE = 5 # seconds
91
+
92
+ prometheus_multiproc_dir: tempfile.TemporaryDirectory
93
+
94
+ # Cannot use __name__ (https://github.com/vllm-project/vllm/pull/4765)
95
+ logger = init_logger('vllm.entrypoints.openai.api_server')
96
+
97
+ _running_tasks: Set[asyncio.Task] = set()
98
+
99
+
100
+ @asynccontextmanager
101
+ async def lifespan(app: FastAPI):
102
+ try:
103
+ if app.state.log_stats:
104
+ engine_client: EngineClient = app.state.engine_client
105
+
106
+ async def _force_log():
107
+ while True:
108
+ await asyncio.sleep(10.)
109
+ await engine_client.do_log_stats()
110
+
111
+ task = asyncio.create_task(_force_log())
112
+ _running_tasks.add(task)
113
+ task.add_done_callback(_running_tasks.remove)
114
+ else:
115
+ task = None
116
+
117
+ # Mark the startup heap as static so that it's ignored by GC.
118
+ # Reduces pause times of oldest generation collections.
119
+ gc.collect()
120
+ gc.freeze()
121
+ try:
122
+ yield
123
+ finally:
124
+ if task is not None:
125
+ task.cancel()
126
+ finally:
127
+ # Ensure app state including engine ref is gc'd
128
+ del app.state
129
+
130
+
131
+ @asynccontextmanager
132
+ async def build_async_engine_client(
133
+ args: Namespace) -> AsyncIterator[EngineClient]:
134
+
135
+ # Context manager to handle engine_client lifecycle
136
+ # Ensures everything is shutdown and cleaned up on error/exit
137
+ engine_args = AsyncEngineArgs.from_cli_args(args)
138
+
139
+ async with build_async_engine_client_from_engine_args(
140
+ engine_args, args.disable_frontend_multiprocessing) as engine:
141
+ yield engine
142
+
143
+
144
+ @asynccontextmanager
145
+ async def build_async_engine_client_from_engine_args(
146
+ engine_args: AsyncEngineArgs,
147
+ disable_frontend_multiprocessing: bool = False,
148
+ ) -> AsyncIterator[EngineClient]:
149
+ """
150
+ Create EngineClient, either:
151
+ - in-process using the AsyncLLMEngine Directly
152
+ - multiprocess using AsyncLLMEngine RPC
153
+
154
+ Returns the Client or None if the creation failed.
155
+ """
156
+
157
+ # AsyncLLMEngine.
158
+ if (MQLLMEngineClient.is_unsupported_config(engine_args)
159
+ or envs.VLLM_USE_V1 or disable_frontend_multiprocessing):
160
+
161
+ engine_client: Optional[EngineClient] = None
162
+ try:
163
+ engine_client = AsyncLLMEngine.from_engine_args(
164
+ engine_args=engine_args,
165
+ usage_context=UsageContext.OPENAI_API_SERVER)
166
+ yield engine_client
167
+ finally:
168
+ if engine_client and hasattr(engine_client, "shutdown"):
169
+ engine_client.shutdown()
170
+
171
+ # MQLLMEngine.
172
+ else:
173
+ if "PROMETHEUS_MULTIPROC_DIR" not in os.environ:
174
+ # Make TemporaryDirectory for prometheus multiprocessing
175
+ # Note: global TemporaryDirectory will be automatically
176
+ # cleaned up upon exit.
177
+ global prometheus_multiproc_dir
178
+ prometheus_multiproc_dir = tempfile.TemporaryDirectory()
179
+ os.environ[
180
+ "PROMETHEUS_MULTIPROC_DIR"] = prometheus_multiproc_dir.name
181
+ else:
182
+ logger.warning(
183
+ "Found PROMETHEUS_MULTIPROC_DIR was set by user. "
184
+ "This directory must be wiped between vLLM runs or "
185
+ "you will find inaccurate metrics. Unset the variable "
186
+ "and vLLM will properly handle cleanup.")
187
+
188
+ # Select random path for IPC.
189
+ ipc_path = get_open_zmq_ipc_path()
190
+ logger.debug("Multiprocessing frontend to use %s for IPC Path.",
191
+ ipc_path)
192
+
193
+ # Start RPCServer in separate process (holds the LLMEngine).
194
+ # the current process might have CUDA context,
195
+ # so we need to spawn a new process
196
+ context = multiprocessing.get_context("spawn")
197
+
198
+ # The Process can raise an exception during startup, which may
199
+ # not actually result in an exitcode being reported. As a result
200
+ # we use a shared variable to communicate the information.
201
+ engine_alive = multiprocessing.Value('b', True, lock=False)
202
+ engine_process = context.Process(target=run_mp_engine,
203
+ args=(engine_args,
204
+ UsageContext.OPENAI_API_SERVER,
205
+ ipc_path, engine_alive))
206
+ engine_process.start()
207
+ engine_pid = engine_process.pid
208
+ assert engine_pid is not None, "Engine process failed to start."
209
+ logger.info("Started engine process with PID %d", engine_pid)
210
+
211
+ def _cleanup_ipc_path():
212
+ socket_path = ipc_path.replace("ipc://", "")
213
+ if os.path.exists(socket_path):
214
+ os.remove(socket_path)
215
+
216
+ # Ensure we clean up the local IPC socket file on exit.
217
+ atexit.register(_cleanup_ipc_path)
218
+
219
+ # Build RPCClient, which conforms to EngineClient Protocol.
220
+ engine_config = engine_args.create_engine_config()
221
+ build_client = partial(MQLLMEngineClient, ipc_path, engine_config,
222
+ engine_pid)
223
+ mq_engine_client = await asyncio.get_running_loop().run_in_executor(
224
+ None, build_client)
225
+ try:
226
+ while True:
227
+ try:
228
+ await mq_engine_client.setup()
229
+ break
230
+ except TimeoutError:
231
+ if (not engine_process.is_alive()
232
+ or not engine_alive.value):
233
+ raise RuntimeError(
234
+ "Engine process failed to start. See stack "
235
+ "trace for the root cause.") from None
236
+
237
+ yield mq_engine_client # type: ignore[misc]
238
+ finally:
239
+ # Ensure rpc server process was terminated
240
+ engine_process.terminate()
241
+
242
+ # Close all open connections to the backend
243
+ mq_engine_client.close()
244
+
245
+ # Wait for engine process to join
246
+ engine_process.join(4)
247
+ if engine_process.exitcode is None:
248
+ # Kill if taking longer than 5 seconds to stop
249
+ engine_process.kill()
250
+
251
+ # Lazy import for prometheus multiprocessing.
252
+ # We need to set PROMETHEUS_MULTIPROC_DIR environment variable
253
+ # before prometheus_client is imported.
254
+ # See https://prometheus.github.io/client_python/multiprocess/
255
+ from prometheus_client import multiprocess
256
+ multiprocess.mark_process_dead(engine_process.pid)
257
+
258
+
259
+ async def validate_json_request(raw_request: Request):
260
+ content_type = raw_request.headers.get("content-type", "").lower()
261
+ media_type = content_type.split(";", maxsplit=1)[0]
262
+ if media_type != "application/json":
263
+ raise HTTPException(
264
+ status_code=HTTPStatus.UNSUPPORTED_MEDIA_TYPE,
265
+ detail="Unsupported Media Type: Only 'application/json' is allowed"
266
+ )
267
+
268
+
269
+ router = APIRouter()
270
+
271
+
272
+ def mount_metrics(app: FastAPI):
273
+ # Lazy import for prometheus multiprocessing.
274
+ # We need to set PROMETHEUS_MULTIPROC_DIR environment variable
275
+ # before prometheus_client is imported.
276
+ # See https://prometheus.github.io/client_python/multiprocess/
277
+ from prometheus_client import (CollectorRegistry, make_asgi_app,
278
+ multiprocess)
279
+
280
+ prometheus_multiproc_dir_path = os.getenv("PROMETHEUS_MULTIPROC_DIR", None)
281
+ if prometheus_multiproc_dir_path is not None:
282
+ logger.debug("vLLM to use %s as PROMETHEUS_MULTIPROC_DIR",
283
+ prometheus_multiproc_dir_path)
284
+ registry = CollectorRegistry()
285
+ multiprocess.MultiProcessCollector(registry)
286
+
287
+ # Add prometheus asgi middleware to route /metrics requests
288
+ metrics_route = Mount("/metrics", make_asgi_app(registry=registry))
289
+ else:
290
+ # Add prometheus asgi middleware to route /metrics requests
291
+ metrics_route = Mount("/metrics", make_asgi_app())
292
+
293
+ # Workaround for 307 Redirect for /metrics
294
+ metrics_route.path_regex = re.compile("^/metrics(?P<path>.*)$")
295
+ app.routes.append(metrics_route)
296
+
297
+
298
+ def base(request: Request) -> OpenAIServing:
299
+ # Reuse the existing instance
300
+ return tokenization(request)
301
+
302
+
303
+ def models(request: Request) -> OpenAIServingModels:
304
+ return request.app.state.openai_serving_models
305
+
306
+
307
+ def chat(request: Request) -> Optional[OpenAIServingChat]:
308
+ return request.app.state.openai_serving_chat
309
+
310
+
311
+ def completion(request: Request) -> Optional[OpenAIServingCompletion]:
312
+ return request.app.state.openai_serving_completion
313
+
314
+
315
+ def pooling(request: Request) -> Optional[OpenAIServingPooling]:
316
+ return request.app.state.openai_serving_pooling
317
+
318
+
319
+ def embedding(request: Request) -> Optional[OpenAIServingEmbedding]:
320
+ return request.app.state.openai_serving_embedding
321
+
322
+
323
+ def score(request: Request) -> Optional[OpenAIServingScores]:
324
+ return request.app.state.openai_serving_scores
325
+
326
+
327
+ def rerank(request: Request) -> Optional[JinaAIServingRerank]:
328
+ return request.app.state.jinaai_serving_reranking
329
+
330
+
331
+ def tokenization(request: Request) -> OpenAIServingTokenization:
332
+ return request.app.state.openai_serving_tokenization
333
+
334
+
335
+ def transcription(request: Request) -> OpenAIServingTranscription:
336
+ return request.app.state.openai_serving_transcription
337
+
338
+
339
+ def engine_client(request: Request) -> EngineClient:
340
+ return request.app.state.engine_client
341
+
342
+
343
+ @router.get("/health")
344
+ async def health(raw_request: Request) -> Response:
345
+ """Health check."""
346
+ await engine_client(raw_request).check_health()
347
+ return Response(status_code=200)
348
+
349
+
350
+ @router.api_route("/ping", methods=["GET", "POST"])
351
+ async def ping(raw_request: Request) -> Response:
352
+ """Ping check. Endpoint required for SageMaker"""
353
+ return await health(raw_request)
354
+
355
+
356
+ @router.post("/tokenize", dependencies=[Depends(validate_json_request)])
357
+ @with_cancellation
358
+ async def tokenize(request: TokenizeRequest, raw_request: Request):
359
+ handler = tokenization(raw_request)
360
+
361
+ generator = await handler.create_tokenize(request, raw_request)
362
+ if isinstance(generator, ErrorResponse):
363
+ return JSONResponse(content=generator.model_dump(),
364
+ status_code=generator.code)
365
+ elif isinstance(generator, TokenizeResponse):
366
+ return JSONResponse(content=generator.model_dump())
367
+
368
+ assert_never(generator)
369
+
370
+
371
+ @router.post("/detokenize", dependencies=[Depends(validate_json_request)])
372
+ @with_cancellation
373
+ async def detokenize(request: DetokenizeRequest, raw_request: Request):
374
+ handler = tokenization(raw_request)
375
+
376
+ generator = await handler.create_detokenize(request, raw_request)
377
+ if isinstance(generator, ErrorResponse):
378
+ return JSONResponse(content=generator.model_dump(),
379
+ status_code=generator.code)
380
+ elif isinstance(generator, DetokenizeResponse):
381
+ return JSONResponse(content=generator.model_dump())
382
+
383
+ assert_never(generator)
384
+
385
+
386
+ @router.get("/v1/models")
387
+ async def show_available_models(raw_request: Request):
388
+ handler = models(raw_request)
389
+
390
+ models_ = await handler.show_available_models()
391
+ return JSONResponse(content=models_.model_dump())
392
+
393
+
394
+ @router.get("/version")
395
+ async def show_version():
396
+ ver = {"version": VLLM_VERSION}
397
+ return JSONResponse(content=ver)
398
+
399
+
400
+ @router.post("/v1/chat/completions",
401
+ dependencies=[Depends(validate_json_request)])
402
+ @with_cancellation
403
+ async def create_chat_completion(request: ChatCompletionRequest,
404
+ raw_request: Request):
405
+ handler = chat(raw_request)
406
+ if handler is None:
407
+ return base(raw_request).create_error_response(
408
+ message="The model does not support Chat Completions API")
409
+
410
+ generator = await handler.create_chat_completion(request, raw_request)
411
+
412
+ if isinstance(generator, ErrorResponse):
413
+ return JSONResponse(content=generator.model_dump(),
414
+ status_code=generator.code)
415
+
416
+ elif isinstance(generator, ChatCompletionResponse):
417
+ return JSONResponse(content=generator.model_dump())
418
+
419
+ return StreamingResponse(content=generator, media_type="text/event-stream")
420
+
421
+
422
+ @router.post("/v1/completions", dependencies=[Depends(validate_json_request)])
423
+ @with_cancellation
424
+ async def create_completion(request: CompletionRequest, raw_request: Request):
425
+ handler = completion(raw_request)
426
+ if handler is None:
427
+ return base(raw_request).create_error_response(
428
+ message="The model does not support Completions API")
429
+
430
+ generator = await handler.create_completion(request, raw_request)
431
+ if isinstance(generator, ErrorResponse):
432
+ return JSONResponse(content=generator.model_dump(),
433
+ status_code=generator.code)
434
+ elif isinstance(generator, CompletionResponse):
435
+ return JSONResponse(content=generator.model_dump())
436
+
437
+ return StreamingResponse(content=generator, media_type="text/event-stream")
438
+
439
+
440
+ @router.post("/v1/embeddings", dependencies=[Depends(validate_json_request)])
441
+ @with_cancellation
442
+ async def create_embedding(request: EmbeddingRequest, raw_request: Request):
443
+ handler = embedding(raw_request)
444
+ if handler is None:
445
+ fallback_handler = pooling(raw_request)
446
+ if fallback_handler is None:
447
+ return base(raw_request).create_error_response(
448
+ message="The model does not support Embeddings API")
449
+
450
+ logger.warning(
451
+ "Embeddings API will become exclusive to embedding models "
452
+ "in a future release. To return the hidden states directly, "
453
+ "use the Pooling API (`/pooling`) instead.")
454
+
455
+ res = await fallback_handler.create_pooling(request, raw_request)
456
+
457
+ generator: Union[ErrorResponse, EmbeddingResponse]
458
+ if isinstance(res, PoolingResponse):
459
+ generator = EmbeddingResponse(
460
+ id=res.id,
461
+ object=res.object,
462
+ created=res.created,
463
+ model=res.model,
464
+ data=[
465
+ EmbeddingResponseData(
466
+ index=d.index,
467
+ embedding=d.data, # type: ignore
468
+ ) for d in res.data
469
+ ],
470
+ usage=res.usage,
471
+ )
472
+ else:
473
+ generator = res
474
+ else:
475
+ generator = await handler.create_embedding(request, raw_request)
476
+
477
+ if isinstance(generator, ErrorResponse):
478
+ return JSONResponse(content=generator.model_dump(),
479
+ status_code=generator.code)
480
+ elif isinstance(generator, EmbeddingResponse):
481
+ return JSONResponse(content=generator.model_dump())
482
+
483
+ assert_never(generator)
484
+
485
+
486
+ @router.post("/pooling", dependencies=[Depends(validate_json_request)])
487
+ @with_cancellation
488
+ async def create_pooling(request: PoolingRequest, raw_request: Request):
489
+ handler = pooling(raw_request)
490
+ if handler is None:
491
+ return base(raw_request).create_error_response(
492
+ message="The model does not support Pooling API")
493
+
494
+ generator = await handler.create_pooling(request, raw_request)
495
+ if isinstance(generator, ErrorResponse):
496
+ return JSONResponse(content=generator.model_dump(),
497
+ status_code=generator.code)
498
+ elif isinstance(generator, PoolingResponse):
499
+ return JSONResponse(content=generator.model_dump())
500
+
501
+ assert_never(generator)
502
+
503
+
504
+ @router.post("/score", dependencies=[Depends(validate_json_request)])
505
+ @with_cancellation
506
+ async def create_score(request: ScoreRequest, raw_request: Request):
507
+ handler = score(raw_request)
508
+ if handler is None:
509
+ return base(raw_request).create_error_response(
510
+ message="The model does not support Score API")
511
+
512
+ generator = await handler.create_score(request, raw_request)
513
+ if isinstance(generator, ErrorResponse):
514
+ return JSONResponse(content=generator.model_dump(),
515
+ status_code=generator.code)
516
+ elif isinstance(generator, ScoreResponse):
517
+ return JSONResponse(content=generator.model_dump())
518
+
519
+ assert_never(generator)
520
+
521
+
522
+ @router.post("/v1/score", dependencies=[Depends(validate_json_request)])
523
+ @with_cancellation
524
+ async def create_score_v1(request: ScoreRequest, raw_request: Request):
525
+ logger.warning(
526
+ "To indicate that Score API is not part of standard OpenAI API, we "
527
+ "have moved it to `/score`. Please update your client accordingly.")
528
+
529
+ return await create_score(request, raw_request)
530
+
531
+
532
+ @router.post("/v1/audio/transcriptions")
533
+ @with_cancellation
534
+ async def create_transcriptions(request: Annotated[TranscriptionRequest,
535
+ Form()],
536
+ raw_request: Request):
537
+
538
+ handler = transcription(raw_request)
539
+ if handler is None:
540
+ return base(raw_request).create_error_response(
541
+ message="The model does not support Transcriptions API")
542
+
543
+ audio_data = await request.file.read()
544
+ generator = await handler.create_transcription(audio_data, request,
545
+ raw_request)
546
+
547
+ if isinstance(generator, ErrorResponse):
548
+ return JSONResponse(content=generator.model_dump(),
549
+ status_code=generator.code)
550
+
551
+ elif isinstance(generator, TranscriptionResponse):
552
+ return JSONResponse(content=generator.model_dump())
553
+
554
+ return StreamingResponse(content=generator, media_type="text/event-stream")
555
+
556
+
557
+ @router.post("/rerank", dependencies=[Depends(validate_json_request)])
558
+ @with_cancellation
559
+ async def do_rerank(request: RerankRequest, raw_request: Request):
560
+ handler = rerank(raw_request)
561
+ if handler is None:
562
+ return base(raw_request).create_error_response(
563
+ message="The model does not support Rerank (Score) API")
564
+ generator = await handler.do_rerank(request, raw_request)
565
+ if isinstance(generator, ErrorResponse):
566
+ return JSONResponse(content=generator.model_dump(),
567
+ status_code=generator.code)
568
+ elif isinstance(generator, RerankResponse):
569
+ return JSONResponse(content=generator.model_dump())
570
+
571
+ assert_never(generator)
572
+
573
+
574
+ @router.post("/v1/rerank", dependencies=[Depends(validate_json_request)])
575
+ @with_cancellation
576
+ async def do_rerank_v1(request: RerankRequest, raw_request: Request):
577
+ logger.warning_once(
578
+ "To indicate that the rerank API is not part of the standard OpenAI"
579
+ " API, we have located it at `/rerank`. Please update your client"
580
+ "accordingly. (Note: Conforms to JinaAI rerank API)")
581
+
582
+ return await do_rerank(request, raw_request)
583
+
584
+
585
+ @router.post("/v2/rerank", dependencies=[Depends(validate_json_request)])
586
+ @with_cancellation
587
+ async def do_rerank_v2(request: RerankRequest, raw_request: Request):
588
+ return await do_rerank(request, raw_request)
589
+
590
+
591
+ TASK_HANDLERS: Dict[str, Dict[str, tuple]] = {
592
+ "generate": {
593
+ "messages": (ChatCompletionRequest, create_chat_completion),
594
+ "default": (CompletionRequest, create_completion),
595
+ },
596
+ "embed": {
597
+ "messages": (EmbeddingChatRequest, create_embedding),
598
+ "default": (EmbeddingCompletionRequest, create_embedding),
599
+ },
600
+ "score": {
601
+ "default": (RerankRequest, do_rerank)
602
+ },
603
+ "rerank": {
604
+ "default": (RerankRequest, do_rerank)
605
+ },
606
+ "reward": {
607
+ "messages": (PoolingChatRequest, create_pooling),
608
+ "default": (PoolingCompletionRequest, create_pooling),
609
+ },
610
+ "classify": {
611
+ "messages": (PoolingChatRequest, create_pooling),
612
+ "default": (PoolingCompletionRequest, create_pooling),
613
+ },
614
+ }
615
+
616
+ if envs.VLLM_SERVER_DEV_MODE:
617
+
618
+ @router.post("/reset_prefix_cache")
619
+ async def reset_prefix_cache(raw_request: Request):
620
+ """
621
+ Reset the prefix cache. Note that we currently do not check if the
622
+ prefix cache is successfully reset in the API server.
623
+ """
624
+ logger.info("Resetting prefix cache...")
625
+ await engine_client(raw_request).reset_prefix_cache()
626
+ return Response(status_code=200)
627
+
628
+ @router.post("/sleep")
629
+ async def sleep(raw_request: Request):
630
+ # get POST params
631
+ level = raw_request.query_params.get("level", "1")
632
+ logger.info("sleep the engine with level %s", level)
633
+ await engine_client(raw_request).sleep(int(level))
634
+ # FIXME: in v0 with frontend multiprocessing, the sleep command
635
+ # is sent but does not finish yet when we return a response.
636
+ return Response(status_code=200)
637
+
638
+ @router.post("/wake_up")
639
+ async def wake_up(raw_request: Request):
640
+ logger.info("wake up the engine")
641
+ await engine_client(raw_request).wake_up()
642
+ # FIXME: in v0 with frontend multiprocessing, the wake-up command
643
+ # is sent but does not finish yet when we return a response.
644
+ return Response(status_code=200)
645
+
646
+
647
+ @router.post("/invocations", dependencies=[Depends(validate_json_request)])
648
+ async def invocations(raw_request: Request):
649
+ """
650
+ For SageMaker, routes requests to other handlers based on model `task`.
651
+ """
652
+ body = await raw_request.json()
653
+ task = raw_request.app.state.task
654
+
655
+ if task not in TASK_HANDLERS:
656
+ raise HTTPException(
657
+ status_code=400,
658
+ detail=f"Unsupported task: '{task}' for '/invocations'. "
659
+ f"Expected one of {set(TASK_HANDLERS.keys())}")
660
+
661
+ handler_config = TASK_HANDLERS[task]
662
+ if "messages" in body:
663
+ request_model, handler = handler_config["messages"]
664
+ else:
665
+ request_model, handler = handler_config["default"]
666
+
667
+ # this is required since we lose the FastAPI automatic casting
668
+ request = request_model.model_validate(body)
669
+ return await handler(request, raw_request)
670
+
671
+
672
+ if envs.VLLM_TORCH_PROFILER_DIR:
673
+ logger.warning(
674
+ "Torch Profiler is enabled in the API server. This should ONLY be "
675
+ "used for local development!")
676
+
677
+ @router.post("/start_profile")
678
+ async def start_profile(raw_request: Request):
679
+ logger.info("Starting profiler...")
680
+ await engine_client(raw_request).start_profile()
681
+ logger.info("Profiler started.")
682
+ return Response(status_code=200)
683
+
684
+ @router.post("/stop_profile")
685
+ async def stop_profile(raw_request: Request):
686
+ logger.info("Stopping profiler...")
687
+ await engine_client(raw_request).stop_profile()
688
+ logger.info("Profiler stopped.")
689
+ return Response(status_code=200)
690
+
691
+
692
+ if envs.VLLM_ALLOW_RUNTIME_LORA_UPDATING:
693
+ logger.warning(
694
+ "Lora dynamic loading & unloading is enabled in the API server. "
695
+ "This should ONLY be used for local development!")
696
+
697
+ @router.post("/v1/load_lora_adapter",
698
+ dependencies=[Depends(validate_json_request)])
699
+ async def load_lora_adapter(request: LoadLoraAdapterRequest,
700
+ raw_request: Request):
701
+ handler = models(raw_request)
702
+ response = await handler.load_lora_adapter(request)
703
+ if isinstance(response, ErrorResponse):
704
+ return JSONResponse(content=response.model_dump(),
705
+ status_code=response.code)
706
+
707
+ return Response(status_code=200, content=response)
708
+
709
+ @router.post("/v1/unload_lora_adapter",
710
+ dependencies=[Depends(validate_json_request)])
711
+ async def unload_lora_adapter(request: UnloadLoraAdapterRequest,
712
+ raw_request: Request):
713
+ handler = models(raw_request)
714
+ response = await handler.unload_lora_adapter(request)
715
+ if isinstance(response, ErrorResponse):
716
+ return JSONResponse(content=response.model_dump(),
717
+ status_code=response.code)
718
+
719
+ return Response(status_code=200, content=response)
720
+
721
+
722
+ def build_app(args: Namespace) -> FastAPI:
723
+ if args.disable_fastapi_docs:
724
+ app = FastAPI(openapi_url=None,
725
+ docs_url=None,
726
+ redoc_url=None,
727
+ lifespan=lifespan)
728
+ else:
729
+ app = FastAPI(lifespan=lifespan)
730
+ app.include_router(router)
731
+ app.root_path = args.root_path
732
+
733
+ mount_metrics(app)
734
+
735
+ app.add_middleware(
736
+ CORSMiddleware,
737
+ allow_origins=args.allowed_origins,
738
+ allow_credentials=args.allow_credentials,
739
+ allow_methods=args.allowed_methods,
740
+ allow_headers=args.allowed_headers,
741
+ )
742
+
743
+ @app.exception_handler(RequestValidationError)
744
+ async def validation_exception_handler(_, exc):
745
+ err = ErrorResponse(message=str(exc),
746
+ type="BadRequestError",
747
+ code=HTTPStatus.BAD_REQUEST)
748
+ return JSONResponse(err.model_dump(),
749
+ status_code=HTTPStatus.BAD_REQUEST)
750
+
751
+ if token := envs.VLLM_API_KEY or args.api_key:
752
+
753
+ @app.middleware("http")
754
+ async def authentication(request: Request, call_next):
755
+ if request.method == "OPTIONS":
756
+ return await call_next(request)
757
+ url_path = request.url.path
758
+ if app.root_path and url_path.startswith(app.root_path):
759
+ url_path = url_path[len(app.root_path):]
760
+ if not url_path.startswith("/v1"):
761
+ return await call_next(request)
762
+ if request.headers.get("Authorization") != "Bearer " + token:
763
+ return JSONResponse(content={"error": "Unauthorized"},
764
+ status_code=401)
765
+ return await call_next(request)
766
+
767
+ if args.enable_request_id_headers:
768
+ logger.warning(
769
+ "CAUTION: Enabling X-Request-Id headers in the API Server. "
770
+ "This can harm performance at high QPS.")
771
+
772
+ @app.middleware("http")
773
+ async def add_request_id(request: Request, call_next):
774
+ request_id = request.headers.get(
775
+ "X-Request-Id") or uuid.uuid4().hex
776
+ response = await call_next(request)
777
+ response.headers["X-Request-Id"] = request_id
778
+ return response
779
+
780
+ for middleware in args.middleware:
781
+ module_path, object_name = middleware.rsplit(".", 1)
782
+ imported = getattr(importlib.import_module(module_path), object_name)
783
+ if inspect.isclass(imported):
784
+ app.add_middleware(imported) # type: ignore[arg-type]
785
+ elif inspect.iscoroutinefunction(imported):
786
+ app.middleware("http")(imported)
787
+ else:
788
+ raise ValueError(f"Invalid middleware {middleware}. "
789
+ f"Must be a function or a class.")
790
+
791
+ return app
792
+
793
+
794
+ async def init_app_state(
795
+ engine_client: EngineClient,
796
+ model_config: ModelConfig,
797
+ state: State,
798
+ args: Namespace,
799
+ ) -> None:
800
+ if args.served_model_name is not None:
801
+ served_model_names = args.served_model_name
802
+ else:
803
+ served_model_names = [args.model]
804
+
805
+ if args.disable_log_requests:
806
+ request_logger = None
807
+ else:
808
+ request_logger = RequestLogger(max_log_len=args.max_log_len)
809
+
810
+ base_model_paths = [
811
+ BaseModelPath(name=name, model_path=args.model)
812
+ for name in served_model_names
813
+ ]
814
+
815
+ state.engine_client = engine_client
816
+ state.log_stats = not args.disable_log_stats
817
+
818
+ resolved_chat_template = load_chat_template(args.chat_template)
819
+ if resolved_chat_template is not None:
820
+ logger.info("Using supplied chat template:\n%s",
821
+ resolved_chat_template)
822
+
823
+ state.openai_serving_models = OpenAIServingModels(
824
+ engine_client=engine_client,
825
+ model_config=model_config,
826
+ base_model_paths=base_model_paths,
827
+ lora_modules=args.lora_modules,
828
+ prompt_adapters=args.prompt_adapters,
829
+ )
830
+ await state.openai_serving_models.init_static_loras()
831
+ state.openai_serving_chat = OpenAIServingChat(
832
+ engine_client,
833
+ model_config,
834
+ state.openai_serving_models,
835
+ args.response_role,
836
+ request_logger=request_logger,
837
+ chat_template=resolved_chat_template,
838
+ chat_template_content_format=args.chat_template_content_format,
839
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
840
+ enable_auto_tools=args.enable_auto_tool_choice,
841
+ tool_parser=args.tool_call_parser,
842
+ enable_reasoning=args.enable_reasoning,
843
+ reasoning_parser=args.reasoning_parser,
844
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
845
+ ) if model_config.runner_type == "generate" else None
846
+ state.openai_serving_completion = OpenAIServingCompletion(
847
+ engine_client,
848
+ model_config,
849
+ state.openai_serving_models,
850
+ request_logger=request_logger,
851
+ return_tokens_as_token_ids=args.return_tokens_as_token_ids,
852
+ ) if model_config.runner_type == "generate" else None
853
+ state.openai_serving_pooling = OpenAIServingPooling(
854
+ engine_client,
855
+ model_config,
856
+ state.openai_serving_models,
857
+ request_logger=request_logger,
858
+ chat_template=resolved_chat_template,
859
+ chat_template_content_format=args.chat_template_content_format,
860
+ ) if model_config.runner_type == "pooling" else None
861
+ state.openai_serving_embedding = OpenAIServingEmbedding(
862
+ engine_client,
863
+ model_config,
864
+ state.openai_serving_models,
865
+ request_logger=request_logger,
866
+ chat_template=resolved_chat_template,
867
+ chat_template_content_format=args.chat_template_content_format,
868
+ ) if model_config.task == "embed" else None
869
+ state.openai_serving_scores = OpenAIServingScores(
870
+ engine_client,
871
+ model_config,
872
+ state.openai_serving_models,
873
+ request_logger=request_logger
874
+ ) if model_config.task == "score" else None
875
+ state.jinaai_serving_reranking = JinaAIServingRerank(
876
+ engine_client,
877
+ model_config,
878
+ state.openai_serving_models,
879
+ request_logger=request_logger
880
+ ) if model_config.task == "score" else None
881
+ state.openai_serving_tokenization = OpenAIServingTokenization(
882
+ engine_client,
883
+ model_config,
884
+ state.openai_serving_models,
885
+ request_logger=request_logger,
886
+ chat_template=resolved_chat_template,
887
+ chat_template_content_format=args.chat_template_content_format,
888
+ )
889
+ state.openai_serving_transcription = OpenAIServingTranscription(
890
+ engine_client,
891
+ model_config,
892
+ state.openai_serving_models,
893
+ request_logger=request_logger,
894
+ ) if model_config.runner_type == "transcription" else None
895
+ state.task = model_config.task
896
+
897
+
898
+ def create_server_socket(addr: Tuple[str, int]) -> socket.socket:
899
+ family = socket.AF_INET
900
+ if is_valid_ipv6_address(addr[0]):
901
+ family = socket.AF_INET6
902
+
903
+ sock = socket.socket(family=family, type=socket.SOCK_STREAM)
904
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
905
+ sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
906
+ sock.bind(addr)
907
+
908
+ return sock
909
+
910
+
911
+ async def run_server(args, **uvicorn_kwargs) -> None:
912
+ logger.info("vLLM API server version %s", VLLM_VERSION)
913
+ logger.info("args: %s", args)
914
+
915
+ if args.tool_parser_plugin and len(args.tool_parser_plugin) > 3:
916
+ ToolParserManager.import_tool_parser(args.tool_parser_plugin)
917
+
918
+ valid_tool_parses = ToolParserManager.tool_parsers.keys()
919
+ if args.enable_auto_tool_choice \
920
+ and args.tool_call_parser not in valid_tool_parses:
921
+ raise KeyError(f"invalid tool call parser: {args.tool_call_parser} "
922
+ f"(chose from {{ {','.join(valid_tool_parses)} }})")
923
+
924
+ valid_reasoning_parses = ReasoningParserManager.reasoning_parsers.keys()
925
+ if args.enable_reasoning \
926
+ and args.reasoning_parser not in valid_reasoning_parses:
927
+ raise KeyError(
928
+ f"invalid reasoning parser: {args.reasoning_parser} "
929
+ f"(chose from {{ {','.join(valid_reasoning_parses)} }})")
930
+
931
+ # workaround to make sure that we bind the port before the engine is set up.
932
+ # This avoids race conditions with ray.
933
+ # see https://github.com/vllm-project/vllm/issues/8204
934
+ sock_addr = (args.host or "", args.port)
935
+ sock = create_server_socket(sock_addr)
936
+
937
+ # workaround to avoid footguns where uvicorn drops requests with too
938
+ # many concurrent requests active
939
+ set_ulimit()
940
+
941
+ def signal_handler(*_) -> None:
942
+ # Interrupt server on sigterm while initializing
943
+ raise KeyboardInterrupt("terminated")
944
+
945
+ signal.signal(signal.SIGTERM, signal_handler)
946
+
947
+ async with build_async_engine_client(args) as engine_client:
948
+ app = build_app(args)
949
+
950
+ model_config = await engine_client.get_model_config()
951
+ await init_app_state(engine_client, model_config, app.state, args)
952
+
953
+ def _listen_addr(a: str) -> str:
954
+ if is_valid_ipv6_address(a):
955
+ return '[' + a + ']'
956
+ return a or "0.0.0.0"
957
+
958
+ logger.info("Starting vLLM API server on http://%s:%d",
959
+ _listen_addr(sock_addr[0]), sock_addr[1])
960
+
961
+ shutdown_task = await serve_http(
962
+ app,
963
+ sock=sock,
964
+ host=args.host,
965
+ port=args.port,
966
+ log_level=args.uvicorn_log_level,
967
+ timeout_keep_alive=TIMEOUT_KEEP_ALIVE,
968
+ ssl_keyfile=args.ssl_keyfile,
969
+ ssl_certfile=args.ssl_certfile,
970
+ ssl_ca_certs=args.ssl_ca_certs,
971
+ ssl_cert_reqs=args.ssl_cert_reqs,
972
+ **uvicorn_kwargs,
973
+ )
974
+
975
+ # NB: Await server shutdown only after the backend context is exited
976
+ await shutdown_task
977
+
978
+ sock.close()
979
+
980
+
981
+ if __name__ == "__main__":
982
+ # NOTE(simon):
983
+ # This section should be in sync with vllm/entrypoints/cli/main.py for CLI
984
+ # entrypoints.
985
+ parser = FlexibleArgumentParser(
986
+ description="vLLM OpenAI-Compatible RESTful API server.")
987
+ parser = make_arg_parser(parser)
988
+ args = parser.parse_args()
989
+ validate_parsed_serve_args(args)
990
+
991
+ uvloop.run(run_server(args))
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/cli_args.py ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+ """
3
+ This file contains the command line arguments for the vLLM's
4
+ OpenAI-compatible server. It is kept in a separate file for documentation
5
+ purposes.
6
+ """
7
+
8
+ import argparse
9
+ import json
10
+ import ssl
11
+ from typing import List, Optional, Sequence, Union, get_args
12
+
13
+ from vllm.engine.arg_utils import AsyncEngineArgs, nullable_str
14
+ from vllm.entrypoints.chat_utils import (ChatTemplateContentFormatOption,
15
+ validate_chat_template)
16
+ from vllm.entrypoints.openai.reasoning_parsers import ReasoningParserManager
17
+ from vllm.entrypoints.openai.serving_models import (LoRAModulePath,
18
+ PromptAdapterPath)
19
+ from vllm.entrypoints.openai.tool_parsers import ToolParserManager
20
+ from vllm.utils import FlexibleArgumentParser
21
+
22
+
23
+ class LoRAParserAction(argparse.Action):
24
+
25
+ def __call__(
26
+ self,
27
+ parser: argparse.ArgumentParser,
28
+ namespace: argparse.Namespace,
29
+ values: Optional[Union[str, Sequence[str]]],
30
+ option_string: Optional[str] = None,
31
+ ):
32
+ if values is None:
33
+ values = []
34
+ if isinstance(values, str):
35
+ raise TypeError("Expected values to be a list")
36
+
37
+ lora_list: List[LoRAModulePath] = []
38
+ for item in values:
39
+ if item in [None, '']: # Skip if item is None or empty string
40
+ continue
41
+ if '=' in item and ',' not in item: # Old format: name=path
42
+ name, path = item.split('=')
43
+ lora_list.append(LoRAModulePath(name, path))
44
+ else: # Assume JSON format
45
+ try:
46
+ lora_dict = json.loads(item)
47
+ lora = LoRAModulePath(**lora_dict)
48
+ lora_list.append(lora)
49
+ except json.JSONDecodeError:
50
+ parser.error(
51
+ f"Invalid JSON format for --lora-modules: {item}")
52
+ except TypeError as e:
53
+ parser.error(
54
+ f"Invalid fields for --lora-modules: {item} - {str(e)}"
55
+ )
56
+ setattr(namespace, self.dest, lora_list)
57
+
58
+
59
+ class PromptAdapterParserAction(argparse.Action):
60
+
61
+ def __call__(
62
+ self,
63
+ parser: argparse.ArgumentParser,
64
+ namespace: argparse.Namespace,
65
+ values: Optional[Union[str, Sequence[str]]],
66
+ option_string: Optional[str] = None,
67
+ ):
68
+ if values is None:
69
+ values = []
70
+ if isinstance(values, str):
71
+ raise TypeError("Expected values to be a list")
72
+
73
+ adapter_list: List[PromptAdapterPath] = []
74
+ for item in values:
75
+ name, path = item.split('=')
76
+ adapter_list.append(PromptAdapterPath(name, path))
77
+ setattr(namespace, self.dest, adapter_list)
78
+
79
+
80
+ def make_arg_parser(parser: FlexibleArgumentParser) -> FlexibleArgumentParser:
81
+ parser.add_argument("--host",
82
+ type=nullable_str,
83
+ default=None,
84
+ help="Host name.")
85
+ parser.add_argument("--port", type=int, default=8000, help="Port number.")
86
+ parser.add_argument(
87
+ "--uvicorn-log-level",
88
+ type=str,
89
+ default="info",
90
+ choices=['debug', 'info', 'warning', 'error', 'critical', 'trace'],
91
+ help="Log level for uvicorn.")
92
+ parser.add_argument("--allow-credentials",
93
+ action="store_true",
94
+ help="Allow credentials.")
95
+ parser.add_argument("--allowed-origins",
96
+ type=json.loads,
97
+ default=["*"],
98
+ help="Allowed origins.")
99
+ parser.add_argument("--allowed-methods",
100
+ type=json.loads,
101
+ default=["*"],
102
+ help="Allowed methods.")
103
+ parser.add_argument("--allowed-headers",
104
+ type=json.loads,
105
+ default=["*"],
106
+ help="Allowed headers.")
107
+ parser.add_argument("--api-key",
108
+ type=nullable_str,
109
+ default=None,
110
+ help="If provided, the server will require this key "
111
+ "to be presented in the header.")
112
+ parser.add_argument(
113
+ "--lora-modules",
114
+ type=nullable_str,
115
+ default=None,
116
+ nargs='+',
117
+ action=LoRAParserAction,
118
+ help="LoRA module configurations in either 'name=path' format"
119
+ "or JSON format. "
120
+ "Example (old format): ``'name=path'`` "
121
+ "Example (new format): "
122
+ "``{\"name\": \"name\", \"path\": \"lora_path\", "
123
+ "\"base_model_name\": \"id\"}``")
124
+ parser.add_argument(
125
+ "--prompt-adapters",
126
+ type=nullable_str,
127
+ default=None,
128
+ nargs='+',
129
+ action=PromptAdapterParserAction,
130
+ help="Prompt adapter configurations in the format name=path. "
131
+ "Multiple adapters can be specified.")
132
+ parser.add_argument("--chat-template",
133
+ type=nullable_str,
134
+ default=None,
135
+ help="The file path to the chat template, "
136
+ "or the template in single-line form "
137
+ "for the specified model.")
138
+ parser.add_argument(
139
+ '--chat-template-content-format',
140
+ type=str,
141
+ default="auto",
142
+ choices=get_args(ChatTemplateContentFormatOption),
143
+ help='The format to render message content within a chat template.'
144
+ '\n\n'
145
+ '* "string" will render the content as a string. '
146
+ 'Example: ``"Hello World"``\n'
147
+ '* "openai" will render the content as a list of dictionaries, '
148
+ 'similar to OpenAI schema. '
149
+ 'Example: ``[{"type": "text", "text": "Hello world!"}]``')
150
+ parser.add_argument("--response-role",
151
+ type=nullable_str,
152
+ default="assistant",
153
+ help="The role name to return if "
154
+ "``request.add_generation_prompt=true``.")
155
+ parser.add_argument("--ssl-keyfile",
156
+ type=nullable_str,
157
+ default=None,
158
+ help="The file path to the SSL key file.")
159
+ parser.add_argument("--ssl-certfile",
160
+ type=nullable_str,
161
+ default=None,
162
+ help="The file path to the SSL cert file.")
163
+ parser.add_argument("--ssl-ca-certs",
164
+ type=nullable_str,
165
+ default=None,
166
+ help="The CA certificates file.")
167
+ parser.add_argument(
168
+ "--ssl-cert-reqs",
169
+ type=int,
170
+ default=int(ssl.CERT_NONE),
171
+ help="Whether client certificate is required (see stdlib ssl module's)."
172
+ )
173
+ parser.add_argument(
174
+ "--root-path",
175
+ type=nullable_str,
176
+ default=None,
177
+ help="FastAPI root_path when app is behind a path based routing proxy."
178
+ )
179
+ parser.add_argument(
180
+ "--middleware",
181
+ type=nullable_str,
182
+ action="append",
183
+ default=[],
184
+ help="Additional ASGI middleware to apply to the app. "
185
+ "We accept multiple --middleware arguments. "
186
+ "The value should be an import path. "
187
+ "If a function is provided, vLLM will add it to the server "
188
+ "using ``@app.middleware('http')``. "
189
+ "If a class is provided, vLLM will add it to the server "
190
+ "using ``app.add_middleware()``. ")
191
+ parser.add_argument(
192
+ "--return-tokens-as-token-ids",
193
+ action="store_true",
194
+ help="When ``--max-logprobs`` is specified, represents single tokens "
195
+ " as strings of the form 'token_id:{token_id}' so that tokens "
196
+ "that are not JSON-encodable can be identified.")
197
+ parser.add_argument(
198
+ "--disable-frontend-multiprocessing",
199
+ action="store_true",
200
+ help="If specified, will run the OpenAI frontend server in the same "
201
+ "process as the model serving engine.")
202
+ parser.add_argument(
203
+ "--enable-request-id-headers",
204
+ action="store_true",
205
+ help="If specified, API server will add X-Request-Id header to "
206
+ "responses. Caution: this hurts performance at high QPS.")
207
+ parser.add_argument(
208
+ "--enable-auto-tool-choice",
209
+ action="store_true",
210
+ default=False,
211
+ help="Enable auto tool choice for supported models. Use "
212
+ "``--tool-call-parser`` to specify which parser to use.")
213
+ parser.add_argument(
214
+ "--enable-reasoning",
215
+ action="store_true",
216
+ default=False,
217
+ help="Whether to enable reasoning_content for the model. "
218
+ "If enabled, the model will be able to generate reasoning content.")
219
+
220
+ valid_reasoning_parsers = ReasoningParserManager.reasoning_parsers.keys()
221
+ parser.add_argument(
222
+ "--reasoning-parser",
223
+ type=str,
224
+ metavar="{" + ",".join(valid_reasoning_parsers) + "}",
225
+ default=None,
226
+ help=
227
+ "Select the reasoning parser depending on the model that you're using."
228
+ " This is used to parse the reasoning content into OpenAI API "
229
+ "format. Required for ``--enable-reasoning``.")
230
+
231
+ valid_tool_parsers = ToolParserManager.tool_parsers.keys()
232
+ parser.add_argument(
233
+ "--tool-call-parser",
234
+ type=str,
235
+ metavar="{" + ",".join(valid_tool_parsers) + "} or name registered in "
236
+ "--tool-parser-plugin",
237
+ default=None,
238
+ help=
239
+ "Select the tool call parser depending on the model that you're using."
240
+ " This is used to parse the model-generated tool call into OpenAI API "
241
+ "format. Required for ``--enable-auto-tool-choice``.")
242
+
243
+ parser.add_argument(
244
+ "--tool-parser-plugin",
245
+ type=str,
246
+ default="",
247
+ help=
248
+ "Special the tool parser plugin write to parse the model-generated tool"
249
+ " into OpenAI API format, the name register in this plugin can be used "
250
+ "in ``--tool-call-parser``.")
251
+
252
+ parser = AsyncEngineArgs.add_cli_args(parser)
253
+
254
+ parser.add_argument('--max-log-len',
255
+ type=int,
256
+ default=None,
257
+ help='Max number of prompt characters or prompt '
258
+ 'ID numbers being printed in log.'
259
+ '\n\nDefault: Unlimited')
260
+
261
+ parser.add_argument(
262
+ "--disable-fastapi-docs",
263
+ action='store_true',
264
+ default=False,
265
+ help="Disable FastAPI's OpenAPI schema, Swagger UI, and ReDoc endpoint."
266
+ )
267
+ parser.add_argument(
268
+ "--enable-prompt-tokens-details",
269
+ action='store_true',
270
+ default=False,
271
+ help="If set to True, enable prompt_tokens_details in usage.")
272
+
273
+ return parser
274
+
275
+
276
+ def validate_parsed_serve_args(args: argparse.Namespace):
277
+ """Quick checks for model serve args that raise prior to loading."""
278
+ if hasattr(args, "subparser") and args.subparser != "serve":
279
+ return
280
+
281
+ # Ensure that the chat template is valid; raises if it likely isn't
282
+ validate_chat_template(args.chat_template)
283
+
284
+ # Enable auto tool needs a tool call parser to be valid
285
+ if args.enable_auto_tool_choice and not args.tool_call_parser:
286
+ raise TypeError("Error: --enable-auto-tool-choice requires "
287
+ "--tool-call-parser")
288
+
289
+ # Enable reasoning needs a reasoning parser to be valid
290
+ if args.enable_reasoning and not args.reasoning_parser:
291
+ raise TypeError("Error: --enable-reasoning requires "
292
+ "--reasoning-parser")
293
+
294
+ # Ref https://api-docs.deepseek.com/guides/reasoning_model
295
+ # tool call and reasoning cannot be enabled at the same time.
296
+ if args.enable_auto_tool_choice and args.enable_reasoning:
297
+ raise TypeError(
298
+ "Error: --enable-auto-tool-choice and "
299
+ "--enable-reasoning cannot be enabled at the same time")
300
+
301
+
302
+ def create_parser_for_docs() -> FlexibleArgumentParser:
303
+ parser_for_docs = FlexibleArgumentParser(
304
+ prog="-m vllm.entrypoints.openai.api_server")
305
+ return make_arg_parser(parser_for_docs)
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/logits_processors.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from functools import lru_cache, partial
4
+ from typing import Dict, FrozenSet, Iterable, List, Optional, Union
5
+
6
+ import torch
7
+
8
+ from vllm.sampling_params import LogitsProcessor
9
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
10
+
11
+
12
+ class AllowedTokenIdsLogitsProcessor:
13
+ """Logits processor for constraining generated tokens to a
14
+ specific set of token ids."""
15
+
16
+ def __init__(self, allowed_ids: Iterable[int]):
17
+ self.allowed_ids: Optional[List[int]] = list(allowed_ids)
18
+ self.mask: Optional[torch.Tensor] = None
19
+
20
+ def __call__(self, token_ids: List[int],
21
+ logits: torch.Tensor) -> torch.Tensor:
22
+ if self.mask is None:
23
+ self.mask = torch.ones((logits.shape[-1], ),
24
+ dtype=torch.bool,
25
+ device=logits.device)
26
+ self.mask[self.allowed_ids] = False
27
+ self.allowed_ids = None
28
+ logits.masked_fill_(self.mask, float("-inf"))
29
+ return logits
30
+
31
+
32
+ @lru_cache(maxsize=32)
33
+ def _get_allowed_token_ids_logits_processor(
34
+ allowed_token_ids: FrozenSet[int],
35
+ vocab_size: int,
36
+ ) -> LogitsProcessor:
37
+ if not allowed_token_ids:
38
+ raise ValueError("Empty allowed_token_ids provided")
39
+ if not all(0 <= tid < vocab_size for tid in allowed_token_ids):
40
+ raise ValueError("allowed_token_ids contains "
41
+ "out-of-vocab token id")
42
+ return AllowedTokenIdsLogitsProcessor(allowed_token_ids)
43
+
44
+
45
+ def logit_bias_logits_processor(
46
+ logit_bias: Dict[int, float],
47
+ token_ids: List[int],
48
+ logits: torch.Tensor,
49
+ ) -> torch.Tensor:
50
+ for token_id, bias in logit_bias.items():
51
+ logits[token_id] += bias
52
+ return logits
53
+
54
+
55
+ def get_logits_processors(
56
+ logit_bias: Optional[Union[Dict[int, float], Dict[str, float]]],
57
+ allowed_token_ids: Optional[List[int]],
58
+ tokenizer: AnyTokenizer,
59
+ ) -> List[LogitsProcessor]:
60
+ logits_processors: List[LogitsProcessor] = []
61
+ if logit_bias:
62
+ try:
63
+ # Convert token_id to integer
64
+ # Clamp the bias between -100 and 100 per OpenAI API spec
65
+ clamped_logit_bias: Dict[int, float] = {
66
+ int(token_id): min(100.0, max(-100.0, bias))
67
+ for token_id, bias in logit_bias.items()
68
+ }
69
+ except ValueError as exc:
70
+ raise ValueError(
71
+ "Found token_id in logit_bias that is not "
72
+ "an integer or string representing an integer") from exc
73
+
74
+ # Check if token_id is within the vocab size
75
+ for token_id, bias in clamped_logit_bias.items():
76
+ if token_id < 0 or token_id >= len(tokenizer):
77
+ raise ValueError(f"token_id {token_id} in logit_bias contains "
78
+ "out-of-vocab token id")
79
+
80
+ logits_processors.append(
81
+ partial(logit_bias_logits_processor, clamped_logit_bias))
82
+
83
+ if allowed_token_ids is not None:
84
+ logits_processors.append(
85
+ _get_allowed_token_ids_logits_processor(
86
+ frozenset(allowed_token_ids), len(tokenizer)))
87
+
88
+ return logits_processors
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/protocol.py ADDED
@@ -0,0 +1,1593 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ # Adapted from
4
+ # https://github.com/lm-sys/FastChat/blob/168ccc29d3f7edc50823016105c024fe2282732a/fastchat/protocol/openai_api_protocol.py
5
+ import re
6
+ import time
7
+ from argparse import Namespace
8
+ from typing import Any, ClassVar, Dict, List, Literal, Optional, Set, Union
9
+
10
+ import torch
11
+ from fastapi import UploadFile
12
+ from pydantic import (BaseModel, ConfigDict, Field, TypeAdapter,
13
+ ValidationInfo, field_validator, model_validator)
14
+ from typing_extensions import Annotated, TypeAlias
15
+
16
+ from vllm.entrypoints.chat_utils import ChatCompletionMessageParam
17
+ from vllm.logger import init_logger
18
+ from vllm.pooling_params import PoolingParams
19
+ from vllm.sampling_params import (BeamSearchParams, GuidedDecodingParams,
20
+ RequestOutputKind, SamplingParams)
21
+ from vllm.sequence import Logprob
22
+ from vllm.utils import random_uuid, resolve_obj_by_qualname
23
+
24
+ logger = init_logger(__name__)
25
+
26
+ # torch is mocked during docs generation,
27
+ # so we have to provide the values as literals
28
+ _MOCK_LONG_INFO = Namespace(min=-9223372036854775808, max=9223372036854775807)
29
+ _LONG_INFO: Union["torch.iinfo", Namespace]
30
+
31
+ try:
32
+ from sphinx.ext.autodoc.mock import _MockModule
33
+
34
+ if isinstance(torch, _MockModule):
35
+ _LONG_INFO = _MOCK_LONG_INFO
36
+ else:
37
+ _LONG_INFO = torch.iinfo(torch.long)
38
+ except ModuleNotFoundError:
39
+ _LONG_INFO = torch.iinfo(torch.long)
40
+
41
+ assert _LONG_INFO.min == _MOCK_LONG_INFO.min
42
+ assert _LONG_INFO.max == _MOCK_LONG_INFO.max
43
+
44
+
45
+ class OpenAIBaseModel(BaseModel):
46
+ # OpenAI API does allow extra fields
47
+ model_config = ConfigDict(extra="allow")
48
+
49
+ # Cache class field names
50
+ field_names: ClassVar[Optional[Set[str]]] = None
51
+
52
+ @model_validator(mode="wrap")
53
+ @classmethod
54
+ def __log_extra_fields__(cls, data, handler):
55
+ result = handler(data)
56
+ if not isinstance(data, dict):
57
+ return result
58
+ field_names = cls.field_names
59
+ if field_names is None:
60
+ # Get all class field names and their potential aliases
61
+ field_names = set()
62
+ for field_name, field in cls.model_fields.items():
63
+ field_names.add(field_name)
64
+ if alias := getattr(field, 'alias', None):
65
+ field_names.add(alias)
66
+ cls.field_names = field_names
67
+
68
+ # Compare against both field names and aliases
69
+ if any(k not in field_names for k in data):
70
+ logger.warning(
71
+ "The following fields were present in the request "
72
+ "but ignored: %s",
73
+ data.keys() - field_names)
74
+ return result
75
+
76
+
77
+ class ErrorResponse(OpenAIBaseModel):
78
+ object: str = "error"
79
+ message: str
80
+ type: str
81
+ param: Optional[str] = None
82
+ code: int
83
+
84
+
85
+ class ModelPermission(OpenAIBaseModel):
86
+ id: str = Field(default_factory=lambda: f"modelperm-{random_uuid()}")
87
+ object: str = "model_permission"
88
+ created: int = Field(default_factory=lambda: int(time.time()))
89
+ allow_create_engine: bool = False
90
+ allow_sampling: bool = True
91
+ allow_logprobs: bool = True
92
+ allow_search_indices: bool = False
93
+ allow_view: bool = True
94
+ allow_fine_tuning: bool = False
95
+ organization: str = "*"
96
+ group: Optional[str] = None
97
+ is_blocking: bool = False
98
+
99
+
100
+ class ModelCard(OpenAIBaseModel):
101
+ id: str
102
+ object: str = "model"
103
+ created: int = Field(default_factory=lambda: int(time.time()))
104
+ owned_by: str = "vllm"
105
+ root: Optional[str] = None
106
+ parent: Optional[str] = None
107
+ max_model_len: Optional[int] = None
108
+ permission: List[ModelPermission] = Field(default_factory=list)
109
+
110
+
111
+ class ModelList(OpenAIBaseModel):
112
+ object: str = "list"
113
+ data: List[ModelCard] = Field(default_factory=list)
114
+
115
+
116
+ class PromptTokenUsageInfo(OpenAIBaseModel):
117
+ cached_tokens: Optional[int] = None
118
+
119
+
120
+ class UsageInfo(OpenAIBaseModel):
121
+ prompt_tokens: int = 0
122
+ total_tokens: int = 0
123
+ completion_tokens: Optional[int] = 0
124
+ prompt_tokens_details: Optional[PromptTokenUsageInfo] = None
125
+
126
+
127
+ class RequestResponseMetadata(BaseModel):
128
+ request_id: str
129
+ final_usage_info: Optional[UsageInfo] = None
130
+
131
+
132
+ class JsonSchemaResponseFormat(OpenAIBaseModel):
133
+ name: str
134
+ description: Optional[str] = None
135
+ # schema is the field in openai but that causes conflicts with pydantic so
136
+ # instead use json_schema with an alias
137
+ json_schema: Optional[Dict[str, Any]] = Field(default=None, alias='schema')
138
+ strict: Optional[bool] = None
139
+
140
+
141
+ class ResponseFormat(OpenAIBaseModel):
142
+ # type must be "json_schema", "json_object" or "text"
143
+ type: Literal["text", "json_object", "json_schema"]
144
+ json_schema: Optional[JsonSchemaResponseFormat] = None
145
+
146
+
147
+ class StreamOptions(OpenAIBaseModel):
148
+ include_usage: Optional[bool] = True
149
+ continuous_usage_stats: Optional[bool] = False
150
+
151
+
152
+ class FunctionDefinition(OpenAIBaseModel):
153
+ name: str
154
+ description: Optional[str] = None
155
+ parameters: Optional[Dict[str, Any]] = None
156
+
157
+
158
+ class ChatCompletionToolsParam(OpenAIBaseModel):
159
+ type: Literal["function"] = "function"
160
+ function: FunctionDefinition
161
+
162
+
163
+ class ChatCompletionNamedFunction(OpenAIBaseModel):
164
+ name: str
165
+
166
+
167
+ class ChatCompletionNamedToolChoiceParam(OpenAIBaseModel):
168
+ function: ChatCompletionNamedFunction
169
+ type: Literal["function"] = "function"
170
+
171
+
172
+ class LogitsProcessorConstructor(BaseModel):
173
+ qualname: str
174
+ args: Optional[List[Any]] = None
175
+ kwargs: Optional[Dict[str, Any]] = None
176
+
177
+
178
+ LogitsProcessors = List[Union[str, LogitsProcessorConstructor]]
179
+
180
+
181
+ def get_logits_processors(processors: Optional[LogitsProcessors],
182
+ pattern: Optional[str]) -> Optional[List[Any]]:
183
+ if processors and pattern:
184
+ logits_processors = []
185
+ for processor in processors:
186
+ qualname = processor if isinstance(processor,
187
+ str) else processor.qualname
188
+ if not re.match(pattern, qualname):
189
+ raise ValueError(
190
+ f"Logits processor '{qualname}' is not allowed by this "
191
+ "server. See --logits-processor-pattern engine argument "
192
+ "for more information.")
193
+ try:
194
+ logits_processor = resolve_obj_by_qualname(qualname)
195
+ except Exception as e:
196
+ raise ValueError(
197
+ f"Logits processor '{qualname}' could not be resolved: {e}"
198
+ ) from e
199
+ if isinstance(processor, LogitsProcessorConstructor):
200
+ logits_processor = logits_processor(*processor.args or [],
201
+ **processor.kwargs or {})
202
+ logits_processors.append(logits_processor)
203
+ return logits_processors
204
+ elif processors:
205
+ raise ValueError(
206
+ "The `logits_processors` argument is not supported by this "
207
+ "server. See --logits-processor-pattern engine argugment "
208
+ "for more information.")
209
+ return None
210
+
211
+
212
+ class ChatCompletionRequest(OpenAIBaseModel):
213
+ # Ordered by official OpenAI API documentation
214
+ # https://platform.openai.com/docs/api-reference/chat/create
215
+ messages: List[ChatCompletionMessageParam]
216
+ model: str
217
+ frequency_penalty: Optional[float] = 0.0
218
+ logit_bias: Optional[Dict[str, float]] = None
219
+ logprobs: Optional[bool] = False
220
+ top_logprobs: Optional[int] = 0
221
+ # TODO(#9845): remove max_tokens when field is removed from OpenAI API
222
+ max_tokens: Optional[int] = Field(
223
+ default=None,
224
+ deprecated=
225
+ 'max_tokens is deprecated in favor of the max_completion_tokens field')
226
+ max_completion_tokens: Optional[int] = None
227
+ n: Optional[int] = 1
228
+ presence_penalty: Optional[float] = 0.0
229
+ response_format: Optional[ResponseFormat] = None
230
+ seed: Optional[int] = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
231
+ stop: Optional[Union[str, List[str]]] = Field(default_factory=list)
232
+ stream: Optional[bool] = False
233
+ stream_options: Optional[StreamOptions] = None
234
+ temperature: Optional[float] = None
235
+ top_p: Optional[float] = None
236
+ tools: Optional[List[ChatCompletionToolsParam]] = None
237
+ tool_choice: Optional[Union[Literal["none"], Literal["auto"],
238
+ ChatCompletionNamedToolChoiceParam]] = "none"
239
+
240
+ # NOTE this will be ignored by VLLM -- the model determines the behavior
241
+ parallel_tool_calls: Optional[bool] = False
242
+ user: Optional[str] = None
243
+
244
+ # doc: begin-chat-completion-sampling-params
245
+ best_of: Optional[int] = None
246
+ use_beam_search: bool = False
247
+ top_k: Optional[int] = None
248
+ min_p: Optional[float] = None
249
+ repetition_penalty: Optional[float] = None
250
+ length_penalty: float = 1.0
251
+ stop_token_ids: Optional[List[int]] = Field(default_factory=list)
252
+ include_stop_str_in_output: bool = False
253
+ ignore_eos: bool = False
254
+ min_tokens: int = 0
255
+ skip_special_tokens: bool = True
256
+ spaces_between_special_tokens: bool = True
257
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None
258
+ prompt_logprobs: Optional[int] = None
259
+ # doc: end-chat-completion-sampling-params
260
+
261
+ # doc: begin-chat-completion-extra-params
262
+ echo: bool = Field(
263
+ default=False,
264
+ description=(
265
+ "If true, the new message will be prepended with the last message "
266
+ "if they belong to the same role."),
267
+ )
268
+ add_generation_prompt: bool = Field(
269
+ default=True,
270
+ description=
271
+ ("If true, the generation prompt will be added to the chat template. "
272
+ "This is a parameter used by chat template in tokenizer config of the "
273
+ "model."),
274
+ )
275
+ continue_final_message: bool = Field(
276
+ default=False,
277
+ description=
278
+ ("If this is set, the chat will be formatted so that the final "
279
+ "message in the chat is open-ended, without any EOS tokens. The "
280
+ "model will continue this message rather than starting a new one. "
281
+ "This allows you to \"prefill\" part of the model's response for it. "
282
+ "Cannot be used at the same time as `add_generation_prompt`."),
283
+ )
284
+ add_special_tokens: bool = Field(
285
+ default=False,
286
+ description=(
287
+ "If true, special tokens (e.g. BOS) will be added to the prompt "
288
+ "on top of what is added by the chat template. "
289
+ "For most models, the chat template takes care of adding the "
290
+ "special tokens so this should be set to false (as is the "
291
+ "default)."),
292
+ )
293
+ documents: Optional[List[Dict[str, str]]] = Field(
294
+ default=None,
295
+ description=
296
+ ("A list of dicts representing documents that will be accessible to "
297
+ "the model if it is performing RAG (retrieval-augmented generation)."
298
+ " If the template does not support RAG, this argument will have no "
299
+ "effect. We recommend that each document should be a dict containing "
300
+ "\"title\" and \"text\" keys."),
301
+ )
302
+ chat_template: Optional[str] = Field(
303
+ default=None,
304
+ description=(
305
+ "A Jinja template to use for this conversion. "
306
+ "As of transformers v4.44, default chat template is no longer "
307
+ "allowed, so you must provide a chat template if the tokenizer "
308
+ "does not define one."),
309
+ )
310
+ chat_template_kwargs: Optional[Dict[str, Any]] = Field(
311
+ default=None,
312
+ description=("Additional kwargs to pass to the template renderer. "
313
+ "Will be accessible by the chat template."),
314
+ )
315
+ mm_processor_kwargs: Optional[Dict[str, Any]] = Field(
316
+ default=None,
317
+ description=("Additional kwargs to pass to the HF processor."),
318
+ )
319
+ guided_json: Optional[Union[str, dict, BaseModel]] = Field(
320
+ default=None,
321
+ description=("If specified, the output will follow the JSON schema."),
322
+ )
323
+ guided_regex: Optional[str] = Field(
324
+ default=None,
325
+ description=(
326
+ "If specified, the output will follow the regex pattern."),
327
+ )
328
+ guided_choice: Optional[List[str]] = Field(
329
+ default=None,
330
+ description=(
331
+ "If specified, the output will be exactly one of the choices."),
332
+ )
333
+ guided_grammar: Optional[str] = Field(
334
+ default=None,
335
+ description=(
336
+ "If specified, the output will follow the context free grammar."),
337
+ )
338
+ guided_decoding_backend: Optional[str] = Field(
339
+ default=None,
340
+ description=(
341
+ "If specified, will override the default guided decoding backend "
342
+ "of the server for this specific request. If set, must be either "
343
+ "'outlines' / 'lm-format-enforcer'"))
344
+ guided_whitespace_pattern: Optional[str] = Field(
345
+ default=None,
346
+ description=(
347
+ "If specified, will override the default whitespace pattern "
348
+ "for guided json decoding."))
349
+ priority: int = Field(
350
+ default=0,
351
+ description=(
352
+ "The priority of the request (lower means earlier handling; "
353
+ "default: 0). Any priority other than 0 will raise an error "
354
+ "if the served model does not use priority scheduling."))
355
+ request_id: str = Field(
356
+ default_factory=lambda: f"{random_uuid()}",
357
+ description=(
358
+ "The request_id related to this request. If the caller does "
359
+ "not set it, a random_uuid will be generated. This id is used "
360
+ "through out the inference process and return in response."))
361
+ logits_processors: Optional[LogitsProcessors] = Field(
362
+ default=None,
363
+ description=(
364
+ "A list of either qualified names of logits processors, or "
365
+ "constructor objects, to apply when sampling. A constructor is "
366
+ "a JSON object with a required 'qualname' field specifying the "
367
+ "qualified name of the processor class/factory, and optional "
368
+ "'args' and 'kwargs' fields containing positional and keyword "
369
+ "arguments. For example: {'qualname': "
370
+ "'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': "
371
+ "{'param': 'value'}}."))
372
+
373
+ # doc: end-chat-completion-extra-params
374
+
375
+ # Default sampling parameters for chat completion requests
376
+ _DEFAULT_SAMPLING_PARAMS: dict = {
377
+ "repetition_penalty": 1.0,
378
+ "temperature": 1.0,
379
+ "top_p": 1.0,
380
+ "top_k": -1,
381
+ "min_p": 0.0,
382
+ }
383
+
384
+ def to_beam_search_params(
385
+ self,
386
+ default_max_tokens: int,
387
+ default_sampling_params: Optional[dict] = None
388
+ ) -> BeamSearchParams:
389
+ # TODO(#9845): remove max_tokens when field is removed from OpenAI API
390
+ max_tokens = self.max_completion_tokens or self.max_tokens
391
+
392
+ if default_sampling_params is None:
393
+ default_sampling_params = {}
394
+ n = self.n if self.n is not None else 1
395
+
396
+ # Use minimum of context window, user request & server limit.
397
+ max_tokens = min(
398
+ val for val in (default_max_tokens, max_tokens,
399
+ default_sampling_params.get("max_tokens", None))
400
+ if val is not None)
401
+
402
+ if (temperature := self.temperature) is None:
403
+ temperature = default_sampling_params.get(
404
+ "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"])
405
+
406
+ return BeamSearchParams(
407
+ beam_width=n,
408
+ max_tokens=max_tokens,
409
+ ignore_eos=self.ignore_eos,
410
+ temperature=temperature,
411
+ length_penalty=self.length_penalty,
412
+ include_stop_str_in_output=self.include_stop_str_in_output)
413
+
414
+ def to_sampling_params(
415
+ self,
416
+ default_max_tokens: int,
417
+ logits_processor_pattern: Optional[str],
418
+ default_sampling_params: Optional[dict] = None) -> SamplingParams:
419
+ # TODO(#9845): remove max_tokens when field is removed from OpenAI API
420
+ max_tokens = self.max_completion_tokens or self.max_tokens
421
+
422
+ if default_sampling_params is None:
423
+ default_sampling_params = {}
424
+
425
+ # Use minimum of context window, user request & server limit.
426
+ max_tokens = min(
427
+ val for val in (default_max_tokens, max_tokens,
428
+ default_sampling_params.get("max_tokens", None))
429
+ if val is not None)
430
+
431
+ # Default parameters
432
+ if (repetition_penalty := self.repetition_penalty) is None:
433
+ repetition_penalty = default_sampling_params.get(
434
+ "repetition_penalty",
435
+ self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
436
+ )
437
+ if (temperature := self.temperature) is None:
438
+ temperature = default_sampling_params.get(
439
+ "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"])
440
+ if (top_p := self.top_p) is None:
441
+ top_p = default_sampling_params.get(
442
+ "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"])
443
+ if (top_k := self.top_k) is None:
444
+ top_k = default_sampling_params.get(
445
+ "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"])
446
+ if (min_p := self.min_p) is None:
447
+ min_p = default_sampling_params.get(
448
+ "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"])
449
+
450
+ prompt_logprobs = self.prompt_logprobs
451
+ if prompt_logprobs is None and self.echo:
452
+ prompt_logprobs = self.top_logprobs
453
+
454
+ guided_json_object = None
455
+ if self.response_format is not None:
456
+ if self.response_format.type == "json_object":
457
+ guided_json_object = True
458
+ elif self.response_format.type == "json_schema":
459
+ json_schema = self.response_format.json_schema
460
+ assert json_schema is not None
461
+ self.guided_json = json_schema.json_schema
462
+ if self.guided_decoding_backend is None:
463
+ self.guided_decoding_backend = "xgrammar"
464
+
465
+ guided_decoding = GuidedDecodingParams.from_optional(
466
+ json=self._get_guided_json_from_tool() or self.guided_json,
467
+ regex=self.guided_regex,
468
+ choice=self.guided_choice,
469
+ grammar=self.guided_grammar,
470
+ json_object=guided_json_object,
471
+ backend=self.guided_decoding_backend,
472
+ whitespace_pattern=self.guided_whitespace_pattern)
473
+
474
+ return SamplingParams.from_optional(
475
+ n=self.n,
476
+ best_of=self.best_of,
477
+ presence_penalty=self.presence_penalty,
478
+ frequency_penalty=self.frequency_penalty,
479
+ repetition_penalty=repetition_penalty,
480
+ temperature=temperature,
481
+ top_p=top_p,
482
+ top_k=top_k,
483
+ min_p=min_p,
484
+ seed=self.seed,
485
+ stop=self.stop,
486
+ stop_token_ids=self.stop_token_ids,
487
+ logprobs=self.top_logprobs if self.logprobs else None,
488
+ prompt_logprobs=prompt_logprobs,
489
+ ignore_eos=self.ignore_eos,
490
+ max_tokens=max_tokens,
491
+ min_tokens=self.min_tokens,
492
+ skip_special_tokens=self.skip_special_tokens,
493
+ spaces_between_special_tokens=self.spaces_between_special_tokens,
494
+ logits_processors=get_logits_processors(self.logits_processors,
495
+ logits_processor_pattern),
496
+ include_stop_str_in_output=self.include_stop_str_in_output,
497
+ truncate_prompt_tokens=self.truncate_prompt_tokens,
498
+ output_kind=RequestOutputKind.DELTA if self.stream \
499
+ else RequestOutputKind.FINAL_ONLY,
500
+ guided_decoding=guided_decoding,
501
+ logit_bias=self.logit_bias)
502
+
503
+ def _get_guided_json_from_tool(
504
+ self) -> Optional[Union[str, dict, BaseModel]]:
505
+ # user has chosen to not use any tool
506
+ if self.tool_choice == "none" or self.tools is None:
507
+ return None
508
+
509
+ # user has chosen to use a named tool
510
+ if type(self.tool_choice) is ChatCompletionNamedToolChoiceParam:
511
+ tool_name = self.tool_choice.function.name
512
+ tools = {tool.function.name: tool.function for tool in self.tools}
513
+ if tool_name not in tools:
514
+ raise ValueError(
515
+ f"Tool '{tool_name}' has not been passed in `tools`.")
516
+ tool = tools[tool_name]
517
+ return tool.parameters
518
+
519
+ return None
520
+
521
+ @model_validator(mode="before")
522
+ @classmethod
523
+ def validate_stream_options(cls, data):
524
+ if data.get("stream_options") and not data.get("stream"):
525
+ raise ValueError(
526
+ "Stream options can only be defined when `stream=True`.")
527
+
528
+ return data
529
+
530
+ @model_validator(mode="before")
531
+ @classmethod
532
+ def check_logprobs(cls, data):
533
+ if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
534
+ if data.get("stream") and prompt_logprobs > 0:
535
+ raise ValueError(
536
+ "`prompt_logprobs` are not available when `stream=True`.")
537
+
538
+ if prompt_logprobs < 0:
539
+ raise ValueError("`prompt_logprobs` must be a positive value.")
540
+
541
+ if (top_logprobs := data.get("top_logprobs")) is not None:
542
+ if top_logprobs < 0:
543
+ raise ValueError("`top_logprobs` must be a positive value.")
544
+
545
+ if not data.get("logprobs"):
546
+ raise ValueError(
547
+ "when using `top_logprobs`, `logprobs` must be set to true."
548
+ )
549
+
550
+ return data
551
+
552
+ @model_validator(mode="before")
553
+ @classmethod
554
+ def check_guided_decoding_count(cls, data):
555
+ if isinstance(data, ValueError):
556
+ raise data
557
+
558
+ guide_count = sum([
559
+ "guided_json" in data and data["guided_json"] is not None,
560
+ "guided_regex" in data and data["guided_regex"] is not None,
561
+ "guided_choice" in data and data["guided_choice"] is not None
562
+ ])
563
+ # you can only use one kind of guided decoding
564
+ if guide_count > 1:
565
+ raise ValueError(
566
+ "You can only use one kind of guided decoding "
567
+ "('guided_json', 'guided_regex' or 'guided_choice').")
568
+ # you can only either use guided decoding or tools, not both
569
+ if guide_count > 1 and data.get("tool_choice",
570
+ "none") not in ("none", "auto"):
571
+ raise ValueError(
572
+ "You can only either use guided decoding or tools, not both.")
573
+ return data
574
+
575
+ @model_validator(mode="before")
576
+ @classmethod
577
+ def check_tool_usage(cls, data):
578
+
579
+ # if "tool_choice" is not specified but tools are provided,
580
+ # default to "auto" tool_choice
581
+ if "tool_choice" not in data and data.get("tools"):
582
+ data["tool_choice"] = "auto"
583
+
584
+ # if "tool_choice" is "none" -- ignore tools if present
585
+ if "tool_choice" in data and data["tool_choice"] == "none":
586
+ # ensure that no tools are present
587
+ data.pop("tools", None)
588
+ return data
589
+
590
+ # if "tool_choice" is specified -- validation
591
+ if "tool_choice" in data:
592
+
593
+ # ensure that if "tool choice" is specified, tools are present
594
+ if "tools" not in data or data["tools"] is None:
595
+ raise ValueError(
596
+ "When using `tool_choice`, `tools` must be set.")
597
+
598
+ # make sure that tool choice is either a named tool
599
+ # OR that it's set to "auto"
600
+ if data["tool_choice"] != "auto" and not isinstance(
601
+ data["tool_choice"], dict):
602
+ raise ValueError(
603
+ "`tool_choice` must either be a named tool, \"auto\", "
604
+ "or \"none\".")
605
+
606
+ # ensure that if "tool_choice" is specified as an object,
607
+ # it matches a valid tool
608
+ if isinstance(data["tool_choice"], dict):
609
+ valid_tool = False
610
+ specified_function = data["tool_choice"].get("function")
611
+ if not specified_function:
612
+ raise ValueError(
613
+ "Expected field `function` in `tool_choice`."
614
+ " Correct usage: `{\"type\": \"function\","
615
+ " \"function\": {\"name\": \"my_function\"}}`")
616
+ specified_function_name = specified_function.get("name")
617
+ if not specified_function_name:
618
+ raise ValueError(
619
+ "Expected field `name` in `function` in `tool_choice`."
620
+ "Correct usage: `{\"type\": \"function\", "
621
+ "\"function\": {\"name\": \"my_function\"}}`")
622
+ for tool in data["tools"]:
623
+ if tool["function"]["name"] == specified_function_name:
624
+ valid_tool = True
625
+ break
626
+ if not valid_tool:
627
+ raise ValueError(
628
+ "The tool specified in `tool_choice` does not match any"
629
+ " of the specified `tools`")
630
+ return data
631
+
632
+ @model_validator(mode="before")
633
+ @classmethod
634
+ def check_generation_prompt(cls, data):
635
+ if data.get("continue_final_message") and data.get(
636
+ "add_generation_prompt"):
637
+ raise ValueError("Cannot set both `continue_final_message` and "
638
+ "`add_generation_prompt` to True.")
639
+ return data
640
+
641
+
642
+ class CompletionRequest(OpenAIBaseModel):
643
+ # Ordered by official OpenAI API documentation
644
+ # https://platform.openai.com/docs/api-reference/completions/create
645
+ model: str
646
+ prompt: Union[List[int], List[List[int]], str, List[str]]
647
+ best_of: Optional[int] = None
648
+ echo: Optional[bool] = False
649
+ frequency_penalty: Optional[float] = 0.0
650
+ logit_bias: Optional[Dict[str, float]] = None
651
+ logprobs: Optional[int] = None
652
+ max_tokens: Optional[int] = 16
653
+ n: int = 1
654
+ presence_penalty: Optional[float] = 0.0
655
+ seed: Optional[int] = Field(None, ge=_LONG_INFO.min, le=_LONG_INFO.max)
656
+ stop: Optional[Union[str, List[str]]] = Field(default_factory=list)
657
+ stream: Optional[bool] = False
658
+ stream_options: Optional[StreamOptions] = None
659
+ suffix: Optional[str] = None
660
+ temperature: Optional[float] = None
661
+ top_p: Optional[float] = None
662
+ user: Optional[str] = None
663
+
664
+ # doc: begin-completion-sampling-params
665
+ use_beam_search: bool = False
666
+ top_k: Optional[int] = None
667
+ min_p: Optional[float] = None
668
+ repetition_penalty: Optional[float] = None
669
+ length_penalty: float = 1.0
670
+ stop_token_ids: Optional[List[int]] = Field(default_factory=list)
671
+ include_stop_str_in_output: bool = False
672
+ ignore_eos: bool = False
673
+ min_tokens: int = 0
674
+ skip_special_tokens: bool = True
675
+ spaces_between_special_tokens: bool = True
676
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None
677
+ allowed_token_ids: Optional[List[int]] = None
678
+ prompt_logprobs: Optional[int] = None
679
+ # doc: end-completion-sampling-params
680
+
681
+ # doc: begin-completion-extra-params
682
+ add_special_tokens: bool = Field(
683
+ default=True,
684
+ description=(
685
+ "If true (the default), special tokens (e.g. BOS) will be added to "
686
+ "the prompt."),
687
+ )
688
+ response_format: Optional[ResponseFormat] = Field(
689
+ default=None,
690
+ description=
691
+ ("Similar to chat completion, this parameter specifies the format of "
692
+ "output. Only {'type': 'json_object'}, {'type': 'json_schema'} or "
693
+ "{'type': 'text' } is supported."),
694
+ )
695
+ guided_json: Optional[Union[str, dict, BaseModel]] = Field(
696
+ default=None,
697
+ description="If specified, the output will follow the JSON schema.",
698
+ )
699
+ guided_regex: Optional[str] = Field(
700
+ default=None,
701
+ description=(
702
+ "If specified, the output will follow the regex pattern."),
703
+ )
704
+ guided_choice: Optional[List[str]] = Field(
705
+ default=None,
706
+ description=(
707
+ "If specified, the output will be exactly one of the choices."),
708
+ )
709
+ guided_grammar: Optional[str] = Field(
710
+ default=None,
711
+ description=(
712
+ "If specified, the output will follow the context free grammar."),
713
+ )
714
+ guided_decoding_backend: Optional[str] = Field(
715
+ default=None,
716
+ description=(
717
+ "If specified, will override the default guided decoding backend "
718
+ "of the server for this specific request. If set, must be one of "
719
+ "'outlines' / 'lm-format-enforcer'"))
720
+ guided_whitespace_pattern: Optional[str] = Field(
721
+ default=None,
722
+ description=(
723
+ "If specified, will override the default whitespace pattern "
724
+ "for guided json decoding."))
725
+ priority: int = Field(
726
+ default=0,
727
+ description=(
728
+ "The priority of the request (lower means earlier handling; "
729
+ "default: 0). Any priority other than 0 will raise an error "
730
+ "if the served model does not use priority scheduling."))
731
+ logits_processors: Optional[LogitsProcessors] = Field(
732
+ default=None,
733
+ description=(
734
+ "A list of either qualified names of logits processors, or "
735
+ "constructor objects, to apply when sampling. A constructor is "
736
+ "a JSON object with a required 'qualname' field specifying the "
737
+ "qualified name of the processor class/factory, and optional "
738
+ "'args' and 'kwargs' fields containing positional and keyword "
739
+ "arguments. For example: {'qualname': "
740
+ "'my_module.MyLogitsProcessor', 'args': [1, 2], 'kwargs': "
741
+ "{'param': 'value'}}."))
742
+
743
+ # doc: end-completion-extra-params
744
+
745
+ # Default sampling parameters for completion requests
746
+ _DEFAULT_SAMPLING_PARAMS: dict = {
747
+ "repetition_penalty": 1.0,
748
+ "temperature": 1.0,
749
+ "top_p": 1.0,
750
+ "top_k": -1,
751
+ "min_p": 0.0,
752
+ }
753
+
754
+ def to_beam_search_params(
755
+ self,
756
+ default_max_tokens: int,
757
+ default_sampling_params: Optional[dict] = None
758
+ ) -> BeamSearchParams:
759
+ max_tokens = self.max_tokens
760
+
761
+ if default_sampling_params is None:
762
+ default_sampling_params = {}
763
+ n = self.n if self.n is not None else 1
764
+
765
+ # Use minimum of context window, user request & server limit.
766
+ max_tokens = min(
767
+ val for val in (default_max_tokens, max_tokens,
768
+ default_sampling_params.get("max_tokens", None))
769
+ if val is not None)
770
+
771
+ if (temperature := self.temperature) is None:
772
+ temperature = default_sampling_params.get("temperature", 1.0)
773
+
774
+ return BeamSearchParams(
775
+ beam_width=n,
776
+ max_tokens=max_tokens,
777
+ ignore_eos=self.ignore_eos,
778
+ temperature=temperature,
779
+ length_penalty=self.length_penalty,
780
+ include_stop_str_in_output=self.include_stop_str_in_output)
781
+
782
+ def to_sampling_params(
783
+ self,
784
+ default_max_tokens: int,
785
+ logits_processor_pattern: Optional[str],
786
+ default_sampling_params: Optional[dict] = None) -> SamplingParams:
787
+ max_tokens = self.max_tokens
788
+
789
+ if default_sampling_params is None:
790
+ default_sampling_params = {}
791
+
792
+ # Use minimum of context window, user request & server limit.
793
+ max_tokens = min(
794
+ val for val in (default_max_tokens, max_tokens,
795
+ default_sampling_params.get("max_tokens", None))
796
+ if val is not None)
797
+
798
+ # Default parameters
799
+ if (repetition_penalty := self.repetition_penalty) is None:
800
+ repetition_penalty = default_sampling_params.get(
801
+ "repetition_penalty",
802
+ self._DEFAULT_SAMPLING_PARAMS["repetition_penalty"],
803
+ )
804
+ if (temperature := self.temperature) is None:
805
+ temperature = default_sampling_params.get(
806
+ "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"])
807
+ if (top_p := self.top_p) is None:
808
+ top_p = default_sampling_params.get(
809
+ "top_p", self._DEFAULT_SAMPLING_PARAMS["top_p"])
810
+ if (top_k := self.top_k) is None:
811
+ top_k = default_sampling_params.get(
812
+ "top_k", self._DEFAULT_SAMPLING_PARAMS["top_k"])
813
+ if (min_p := self.min_p) is None:
814
+ min_p = default_sampling_params.get(
815
+ "min_p", self._DEFAULT_SAMPLING_PARAMS["min_p"])
816
+
817
+ prompt_logprobs = self.prompt_logprobs
818
+ if prompt_logprobs is None and self.echo:
819
+ prompt_logprobs = self.logprobs
820
+
821
+ echo_without_generation = self.echo and self.max_tokens == 0
822
+
823
+ guided_json_object = None
824
+ if (self.response_format is not None
825
+ and self.response_format.type == "json_object"):
826
+ guided_json_object = True
827
+
828
+ guided_decoding = GuidedDecodingParams.from_optional(
829
+ json=self.guided_json,
830
+ regex=self.guided_regex,
831
+ choice=self.guided_choice,
832
+ grammar=self.guided_grammar,
833
+ json_object=guided_json_object,
834
+ backend=self.guided_decoding_backend,
835
+ whitespace_pattern=self.guided_whitespace_pattern)
836
+
837
+ return SamplingParams.from_optional(
838
+ n=self.n,
839
+ best_of=self.best_of,
840
+ presence_penalty=self.presence_penalty,
841
+ frequency_penalty=self.frequency_penalty,
842
+ repetition_penalty=repetition_penalty,
843
+ temperature=temperature,
844
+ top_p=top_p,
845
+ top_k=top_k,
846
+ min_p=min_p,
847
+ seed=self.seed,
848
+ stop=self.stop,
849
+ stop_token_ids=self.stop_token_ids,
850
+ logprobs=self.logprobs,
851
+ ignore_eos=self.ignore_eos,
852
+ max_tokens=max_tokens if not echo_without_generation else 1,
853
+ min_tokens=self.min_tokens,
854
+ prompt_logprobs=prompt_logprobs,
855
+ skip_special_tokens=self.skip_special_tokens,
856
+ spaces_between_special_tokens=self.spaces_between_special_tokens,
857
+ include_stop_str_in_output=self.include_stop_str_in_output,
858
+ logits_processors=get_logits_processors(self.logits_processors,
859
+ logits_processor_pattern),
860
+ truncate_prompt_tokens=self.truncate_prompt_tokens,
861
+ output_kind=RequestOutputKind.DELTA if self.stream \
862
+ else RequestOutputKind.FINAL_ONLY,
863
+ guided_decoding=guided_decoding,
864
+ logit_bias=self.logit_bias,
865
+ allowed_token_ids=self.allowed_token_ids)
866
+
867
+ @model_validator(mode="before")
868
+ @classmethod
869
+ def check_guided_decoding_count(cls, data):
870
+ guide_count = sum([
871
+ "guided_json" in data and data["guided_json"] is not None,
872
+ "guided_regex" in data and data["guided_regex"] is not None,
873
+ "guided_choice" in data and data["guided_choice"] is not None
874
+ ])
875
+ if guide_count > 1:
876
+ raise ValueError(
877
+ "You can only use one kind of guided decoding "
878
+ "('guided_json', 'guided_regex' or 'guided_choice').")
879
+ return data
880
+
881
+ @model_validator(mode="before")
882
+ @classmethod
883
+ def check_logprobs(cls, data):
884
+ if (prompt_logprobs := data.get("prompt_logprobs")) is not None:
885
+ if data.get("stream") and prompt_logprobs > 0:
886
+ raise ValueError(
887
+ "`prompt_logprobs` are not available when `stream=True`.")
888
+
889
+ if prompt_logprobs < 0:
890
+ raise ValueError("`prompt_logprobs` must be a positive value.")
891
+
892
+ if (logprobs := data.get("logprobs")) is not None and logprobs < 0:
893
+ raise ValueError("`logprobs` must be a positive value.")
894
+
895
+ return data
896
+
897
+ @model_validator(mode="before")
898
+ @classmethod
899
+ def validate_stream_options(cls, data):
900
+ if data.get("stream_options") and not data.get("stream"):
901
+ raise ValueError(
902
+ "Stream options can only be defined when `stream=True`.")
903
+
904
+ return data
905
+
906
+
907
+ class EmbeddingCompletionRequest(OpenAIBaseModel):
908
+ # Ordered by official OpenAI API documentation
909
+ # https://platform.openai.com/docs/api-reference/embeddings
910
+ model: str
911
+ input: Union[List[int], List[List[int]], str, List[str]]
912
+ encoding_format: Literal["float", "base64"] = "float"
913
+ dimensions: Optional[int] = None
914
+ user: Optional[str] = None
915
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None
916
+
917
+ # doc: begin-embedding-pooling-params
918
+ additional_data: Optional[Any] = None
919
+ # doc: end-embedding-pooling-params
920
+
921
+ # doc: begin-embedding-extra-params
922
+ add_special_tokens: bool = Field(
923
+ default=True,
924
+ description=(
925
+ "If true (the default), special tokens (e.g. BOS) will be added to "
926
+ "the prompt."),
927
+ )
928
+ priority: int = Field(
929
+ default=0,
930
+ description=(
931
+ "The priority of the request (lower means earlier handling; "
932
+ "default: 0). Any priority other than 0 will raise an error "
933
+ "if the served model does not use priority scheduling."))
934
+
935
+ # doc: end-embedding-extra-params
936
+
937
+ def to_pooling_params(self):
938
+ return PoolingParams(additional_data=self.additional_data)
939
+
940
+
941
+ class EmbeddingChatRequest(OpenAIBaseModel):
942
+ model: str
943
+ messages: List[ChatCompletionMessageParam]
944
+
945
+ encoding_format: Literal["float", "base64"] = "float"
946
+ dimensions: Optional[int] = None
947
+ user: Optional[str] = None
948
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None
949
+
950
+ # doc: begin-chat-embedding-pooling-params
951
+ additional_data: Optional[Any] = None
952
+ # doc: end-chat-embedding-pooling-params
953
+
954
+ # doc: begin-chat-embedding-extra-params
955
+ add_special_tokens: bool = Field(
956
+ default=False,
957
+ description=(
958
+ "If true, special tokens (e.g. BOS) will be added to the prompt "
959
+ "on top of what is added by the chat template. "
960
+ "For most models, the chat template takes care of adding the "
961
+ "special tokens so this should be set to false (as is the "
962
+ "default)."),
963
+ )
964
+ chat_template: Optional[str] = Field(
965
+ default=None,
966
+ description=(
967
+ "A Jinja template to use for this conversion. "
968
+ "As of transformers v4.44, default chat template is no longer "
969
+ "allowed, so you must provide a chat template if the tokenizer "
970
+ "does not define one."),
971
+ )
972
+ chat_template_kwargs: Optional[Dict[str, Any]] = Field(
973
+ default=None,
974
+ description=("Additional kwargs to pass to the template renderer. "
975
+ "Will be accessible by the chat template."),
976
+ )
977
+ priority: int = Field(
978
+ default=0,
979
+ description=(
980
+ "The priority of the request (lower means earlier handling; "
981
+ "default: 0). Any priority other than 0 will raise an error "
982
+ "if the served model does not use priority scheduling."))
983
+ # doc: end-chat-embedding-extra-params
984
+
985
+ @model_validator(mode="before")
986
+ @classmethod
987
+ def check_generation_prompt(cls, data):
988
+ if data.get("continue_final_message") and data.get(
989
+ "add_generation_prompt"):
990
+ raise ValueError("Cannot set both `continue_final_message` and "
991
+ "`add_generation_prompt` to True.")
992
+ return data
993
+
994
+ def to_pooling_params(self):
995
+ return PoolingParams(additional_data=self.additional_data)
996
+
997
+
998
+ EmbeddingRequest = Union[EmbeddingCompletionRequest, EmbeddingChatRequest]
999
+
1000
+ PoolingCompletionRequest = EmbeddingCompletionRequest
1001
+ PoolingChatRequest = EmbeddingChatRequest
1002
+ PoolingRequest = Union[PoolingCompletionRequest, PoolingChatRequest]
1003
+
1004
+
1005
+ class ScoreRequest(OpenAIBaseModel):
1006
+ model: str
1007
+ text_1: Union[List[str], str]
1008
+ text_2: Union[List[str], str]
1009
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None
1010
+
1011
+ # doc: begin-score-pooling-params
1012
+ additional_data: Optional[Any] = None
1013
+ # doc: end-score-pooling-params
1014
+
1015
+ # doc: begin-score-extra-params
1016
+ priority: int = Field(
1017
+ default=0,
1018
+ description=(
1019
+ "The priority of the request (lower means earlier handling; "
1020
+ "default: 0). Any priority other than 0 will raise an error "
1021
+ "if the served model does not use priority scheduling."))
1022
+
1023
+ # doc: end-score-extra-params
1024
+
1025
+ def to_pooling_params(self):
1026
+ return PoolingParams(additional_data=self.additional_data)
1027
+
1028
+
1029
+ class RerankRequest(OpenAIBaseModel):
1030
+ model: str
1031
+ query: str
1032
+ documents: List[str]
1033
+ top_n: int = Field(default_factory=lambda: 0)
1034
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None
1035
+
1036
+ # doc: begin-rerank-pooling-params
1037
+ additional_data: Optional[Any] = None
1038
+ # doc: end-rerank-pooling-params
1039
+
1040
+ # doc: begin-rerank-extra-params
1041
+ priority: int = Field(
1042
+ default=0,
1043
+ description=(
1044
+ "The priority of the request (lower means earlier handling; "
1045
+ "default: 0). Any priority other than 0 will raise an error "
1046
+ "if the served model does not use priority scheduling."))
1047
+
1048
+ # doc: end-rerank-extra-params
1049
+
1050
+ def to_pooling_params(self):
1051
+ return PoolingParams(additional_data=self.additional_data)
1052
+
1053
+
1054
+ class RerankDocument(BaseModel):
1055
+ text: str
1056
+
1057
+
1058
+ class RerankResult(BaseModel):
1059
+ index: int
1060
+ document: RerankDocument
1061
+ relevance_score: float
1062
+
1063
+
1064
+ class RerankUsage(BaseModel):
1065
+ total_tokens: int
1066
+
1067
+
1068
+ class RerankResponse(OpenAIBaseModel):
1069
+ id: str
1070
+ model: str
1071
+ usage: RerankUsage
1072
+ results: List[RerankResult]
1073
+
1074
+
1075
+ class CompletionLogProbs(OpenAIBaseModel):
1076
+ text_offset: List[int] = Field(default_factory=list)
1077
+ token_logprobs: List[Optional[float]] = Field(default_factory=list)
1078
+ tokens: List[str] = Field(default_factory=list)
1079
+ top_logprobs: List[Optional[Dict[str,
1080
+ float]]] = Field(default_factory=list)
1081
+
1082
+
1083
+ class CompletionResponseChoice(OpenAIBaseModel):
1084
+ index: int
1085
+ text: str
1086
+ logprobs: Optional[CompletionLogProbs] = None
1087
+ finish_reason: Optional[str] = None
1088
+ stop_reason: Optional[Union[int, str]] = Field(
1089
+ default=None,
1090
+ description=(
1091
+ "The stop string or token id that caused the completion "
1092
+ "to stop, None if the completion finished for some other reason "
1093
+ "including encountering the EOS token"),
1094
+ )
1095
+ prompt_logprobs: Optional[List[Optional[Dict[int, Logprob]]]] = None
1096
+
1097
+
1098
+ class CompletionResponse(OpenAIBaseModel):
1099
+ id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
1100
+ object: str = "text_completion"
1101
+ created: int = Field(default_factory=lambda: int(time.time()))
1102
+ model: str
1103
+ choices: List[CompletionResponseChoice]
1104
+ usage: UsageInfo
1105
+
1106
+
1107
+ class CompletionResponseStreamChoice(OpenAIBaseModel):
1108
+ index: int
1109
+ text: str
1110
+ logprobs: Optional[CompletionLogProbs] = None
1111
+ finish_reason: Optional[str] = None
1112
+ stop_reason: Optional[Union[int, str]] = Field(
1113
+ default=None,
1114
+ description=(
1115
+ "The stop string or token id that caused the completion "
1116
+ "to stop, None if the completion finished for some other reason "
1117
+ "including encountering the EOS token"),
1118
+ )
1119
+
1120
+
1121
+ class CompletionStreamResponse(OpenAIBaseModel):
1122
+ id: str = Field(default_factory=lambda: f"cmpl-{random_uuid()}")
1123
+ object: str = "text_completion"
1124
+ created: int = Field(default_factory=lambda: int(time.time()))
1125
+ model: str
1126
+ choices: List[CompletionResponseStreamChoice]
1127
+ usage: Optional[UsageInfo] = Field(default=None)
1128
+
1129
+
1130
+ class EmbeddingResponseData(OpenAIBaseModel):
1131
+ index: int
1132
+ object: str = "embedding"
1133
+ embedding: Union[List[float], str]
1134
+
1135
+
1136
+ class EmbeddingResponse(OpenAIBaseModel):
1137
+ id: str = Field(default_factory=lambda: f"embd-{random_uuid()}")
1138
+ object: str = "list"
1139
+ created: int = Field(default_factory=lambda: int(time.time()))
1140
+ model: str
1141
+ data: List[EmbeddingResponseData]
1142
+ usage: UsageInfo
1143
+
1144
+
1145
+ class PoolingResponseData(OpenAIBaseModel):
1146
+ index: int
1147
+ object: str = "pooling"
1148
+ data: Union[List[List[float]], List[float], str]
1149
+
1150
+
1151
+ class PoolingResponse(OpenAIBaseModel):
1152
+ id: str = Field(default_factory=lambda: f"pool-{random_uuid()}")
1153
+ object: str = "list"
1154
+ created: int = Field(default_factory=lambda: int(time.time()))
1155
+ model: str
1156
+ data: List[PoolingResponseData]
1157
+ usage: UsageInfo
1158
+
1159
+
1160
+ class ScoreResponseData(OpenAIBaseModel):
1161
+ index: int
1162
+ object: str = "score"
1163
+ score: float
1164
+
1165
+
1166
+ class ScoreResponse(OpenAIBaseModel):
1167
+ id: str = Field(default_factory=lambda: f"embd-{random_uuid()}")
1168
+ object: str = "list"
1169
+ created: int = Field(default_factory=lambda: int(time.time()))
1170
+ model: str
1171
+ data: List[ScoreResponseData]
1172
+ usage: UsageInfo
1173
+
1174
+
1175
+ class FunctionCall(OpenAIBaseModel):
1176
+ name: str
1177
+ arguments: str
1178
+
1179
+
1180
+ class ToolCall(OpenAIBaseModel):
1181
+ id: str = Field(default_factory=lambda: f"chatcmpl-tool-{random_uuid()}")
1182
+ type: Literal["function"] = "function"
1183
+ function: FunctionCall
1184
+
1185
+
1186
+ class DeltaFunctionCall(BaseModel):
1187
+ name: Optional[str] = None
1188
+ arguments: Optional[str] = None
1189
+
1190
+
1191
+ # a tool call delta where everything is optional
1192
+ class DeltaToolCall(OpenAIBaseModel):
1193
+ id: str = Field(default_factory=lambda: f"chatcmpl-tool-{random_uuid()}")
1194
+ type: Literal["function"] = "function"
1195
+ index: int
1196
+ function: Optional[DeltaFunctionCall] = None
1197
+
1198
+
1199
+ class ExtractedToolCallInformation(BaseModel):
1200
+ # indicate if tools were called
1201
+ tools_called: bool
1202
+
1203
+ # extracted tool calls
1204
+ tool_calls: List[ToolCall]
1205
+
1206
+ # content - per OpenAI spec, content AND tool calls can be returned rarely
1207
+ # But some models will do this intentionally
1208
+ content: Optional[str] = None
1209
+
1210
+
1211
+ class ChatMessage(OpenAIBaseModel):
1212
+ role: str
1213
+ reasoning_content: Optional[str] = None
1214
+ content: Optional[str] = None
1215
+ tool_calls: List[ToolCall] = Field(default_factory=list)
1216
+
1217
+
1218
+ class ChatCompletionLogProb(OpenAIBaseModel):
1219
+ token: str
1220
+ logprob: float = -9999.0
1221
+ bytes: Optional[List[int]] = None
1222
+
1223
+
1224
+ class ChatCompletionLogProbsContent(ChatCompletionLogProb):
1225
+ top_logprobs: List[ChatCompletionLogProb] = Field(default_factory=list)
1226
+
1227
+
1228
+ class ChatCompletionLogProbs(OpenAIBaseModel):
1229
+ content: Optional[List[ChatCompletionLogProbsContent]] = None
1230
+
1231
+
1232
+ class ChatCompletionResponseChoice(OpenAIBaseModel):
1233
+ index: int
1234
+ message: ChatMessage
1235
+ logprobs: Optional[ChatCompletionLogProbs] = None
1236
+ # per OpenAI spec this is the default
1237
+ finish_reason: Optional[str] = "stop"
1238
+ # not part of the OpenAI spec but included in vLLM for legacy reasons
1239
+ stop_reason: Optional[Union[int, str]] = None
1240
+
1241
+
1242
+ class ChatCompletionResponse(OpenAIBaseModel):
1243
+ id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}")
1244
+ object: Literal["chat.completion"] = "chat.completion"
1245
+ created: int = Field(default_factory=lambda: int(time.time()))
1246
+ model: str
1247
+ choices: List[ChatCompletionResponseChoice]
1248
+ usage: UsageInfo
1249
+ prompt_logprobs: Optional[List[Optional[Dict[int, Logprob]]]] = None
1250
+
1251
+
1252
+ class DeltaMessage(OpenAIBaseModel):
1253
+ role: Optional[str] = None
1254
+ content: Optional[str] = None
1255
+ reasoning_content: Optional[str] = None
1256
+ tool_calls: List[DeltaToolCall] = Field(default_factory=list)
1257
+
1258
+
1259
+ class ChatCompletionResponseStreamChoice(OpenAIBaseModel):
1260
+ index: int
1261
+ delta: DeltaMessage
1262
+ logprobs: Optional[ChatCompletionLogProbs] = None
1263
+ finish_reason: Optional[str] = None
1264
+ stop_reason: Optional[Union[int, str]] = None
1265
+
1266
+
1267
+ class ChatCompletionStreamResponse(OpenAIBaseModel):
1268
+ id: str = Field(default_factory=lambda: f"chatcmpl-{random_uuid()}")
1269
+ object: Literal["chat.completion.chunk"] = "chat.completion.chunk"
1270
+ created: int = Field(default_factory=lambda: int(time.time()))
1271
+ model: str
1272
+ choices: List[ChatCompletionResponseStreamChoice]
1273
+ usage: Optional[UsageInfo] = Field(default=None)
1274
+
1275
+
1276
+ class BatchRequestInput(OpenAIBaseModel):
1277
+ """
1278
+ The per-line object of the batch input file.
1279
+
1280
+ NOTE: Currently only the `/v1/chat/completions` endpoint is supported.
1281
+ """
1282
+
1283
+ # A developer-provided per-request id that will be used to match outputs to
1284
+ # inputs. Must be unique for each request in a batch.
1285
+ custom_id: str
1286
+
1287
+ # The HTTP method to be used for the request. Currently only POST is
1288
+ # supported.
1289
+ method: str
1290
+
1291
+ # The OpenAI API relative URL to be used for the request. Currently
1292
+ # /v1/chat/completions is supported.
1293
+ url: str
1294
+
1295
+ # The parameters of the request.
1296
+ body: Union[ChatCompletionRequest, EmbeddingRequest, ScoreRequest]
1297
+
1298
+ @field_validator('body', mode='plain')
1299
+ @classmethod
1300
+ def check_type_for_url(cls, value: Any, info: ValidationInfo):
1301
+ # Use url to disambiguate models
1302
+ url = info.data['url']
1303
+ if url == "/v1/chat/completions":
1304
+ return ChatCompletionRequest.model_validate(value)
1305
+ if url == "/v1/embeddings":
1306
+ return TypeAdapter(EmbeddingRequest).validate_python(value)
1307
+ if url == "/v1/score":
1308
+ return ScoreRequest.model_validate(value)
1309
+ return TypeAdapter(Union[ChatCompletionRequest, EmbeddingRequest,
1310
+ ScoreRequest]).validate_python(value)
1311
+
1312
+
1313
+ class BatchResponseData(OpenAIBaseModel):
1314
+ # HTTP status code of the response.
1315
+ status_code: int = 200
1316
+
1317
+ # An unique identifier for the API request.
1318
+ request_id: str
1319
+
1320
+ # The body of the response.
1321
+ body: Optional[Union[ChatCompletionResponse, EmbeddingResponse,
1322
+ ScoreResponse]] = None
1323
+
1324
+
1325
+ class BatchRequestOutput(OpenAIBaseModel):
1326
+ """
1327
+ The per-line object of the batch output and error files
1328
+ """
1329
+
1330
+ id: str
1331
+
1332
+ # A developer-provided per-request id that will be used to match outputs to
1333
+ # inputs.
1334
+ custom_id: str
1335
+
1336
+ response: Optional[BatchResponseData]
1337
+
1338
+ # For requests that failed with a non-HTTP error, this will contain more
1339
+ # information on the cause of the failure.
1340
+ error: Optional[Any]
1341
+
1342
+
1343
+ class TokenizeCompletionRequest(OpenAIBaseModel):
1344
+ model: str
1345
+ prompt: str
1346
+
1347
+ add_special_tokens: bool = Field(
1348
+ default=True,
1349
+ description=(
1350
+ "If true (the default), special tokens (e.g. BOS) will be added to "
1351
+ "the prompt."),
1352
+ )
1353
+
1354
+
1355
+ class TokenizeChatRequest(OpenAIBaseModel):
1356
+ model: str
1357
+ messages: List[ChatCompletionMessageParam]
1358
+
1359
+ add_generation_prompt: bool = Field(
1360
+ default=True,
1361
+ description=
1362
+ ("If true, the generation prompt will be added to the chat template. "
1363
+ "This is a parameter used by chat template in tokenizer config of the "
1364
+ "model."),
1365
+ )
1366
+ continue_final_message: bool = Field(
1367
+ default=False,
1368
+ description=
1369
+ ("If this is set, the chat will be formatted so that the final "
1370
+ "message in the chat is open-ended, without any EOS tokens. The "
1371
+ "model will continue this message rather than starting a new one. "
1372
+ "This allows you to \"prefill\" part of the model's response for it. "
1373
+ "Cannot be used at the same time as `add_generation_prompt`."),
1374
+ )
1375
+ add_special_tokens: bool = Field(
1376
+ default=False,
1377
+ description=(
1378
+ "If true, special tokens (e.g. BOS) will be added to the prompt "
1379
+ "on top of what is added by the chat template. "
1380
+ "For most models, the chat template takes care of adding the "
1381
+ "special tokens so this should be set to false (as is the "
1382
+ "default)."),
1383
+ )
1384
+ chat_template: Optional[str] = Field(
1385
+ default=None,
1386
+ description=(
1387
+ "A Jinja template to use for this conversion. "
1388
+ "As of transformers v4.44, default chat template is no longer "
1389
+ "allowed, so you must provide a chat template if the tokenizer "
1390
+ "does not define one."),
1391
+ )
1392
+ chat_template_kwargs: Optional[Dict[str, Any]] = Field(
1393
+ default=None,
1394
+ description=("Additional kwargs to pass to the template renderer. "
1395
+ "Will be accessible by the chat template."),
1396
+ )
1397
+
1398
+ @model_validator(mode="before")
1399
+ @classmethod
1400
+ def check_generation_prompt(cls, data):
1401
+ if data.get("continue_final_message") and data.get(
1402
+ "add_generation_prompt"):
1403
+ raise ValueError("Cannot set both `continue_final_message` and "
1404
+ "`add_generation_prompt` to True.")
1405
+ return data
1406
+
1407
+
1408
+ TokenizeRequest = Union[TokenizeCompletionRequest, TokenizeChatRequest]
1409
+
1410
+
1411
+ class TokenizeResponse(OpenAIBaseModel):
1412
+ count: int
1413
+ max_model_len: int
1414
+ tokens: List[int]
1415
+
1416
+
1417
+ class DetokenizeRequest(OpenAIBaseModel):
1418
+ model: str
1419
+ tokens: List[int]
1420
+
1421
+
1422
+ class DetokenizeResponse(OpenAIBaseModel):
1423
+ prompt: str
1424
+
1425
+
1426
+ class LoadLoraAdapterRequest(BaseModel):
1427
+ lora_name: str
1428
+ lora_path: str
1429
+
1430
+
1431
+ class UnloadLoraAdapterRequest(BaseModel):
1432
+ lora_name: str
1433
+ lora_int_id: Optional[int] = Field(default=None)
1434
+
1435
+
1436
+ ## Protocols for Audio
1437
+ AudioResponseFormat: TypeAlias = Literal["json", "text", "srt", "verbose_json",
1438
+ "vtt"]
1439
+
1440
+
1441
+ class TranscriptionRequest(OpenAIBaseModel):
1442
+ # Ordered by official OpenAI API documentation
1443
+ #https://platform.openai.com/docs/api-reference/audio/createTranscription
1444
+
1445
+ file: UploadFile
1446
+ """
1447
+ The audio file object (not file name) to transcribe, in one of these
1448
+ formats: flac, mp3, mp4, mpeg, mpga, m4a, ogg, wav, or webm.
1449
+ """
1450
+
1451
+ model: str
1452
+ """ID of the model to use.
1453
+ """
1454
+
1455
+ language: Optional[str] = None
1456
+ """The language of the input audio.
1457
+
1458
+ Supplying the input language in
1459
+ [ISO-639-1](https://en.wikipedia.org/wiki/List_of_ISO_639-1_codes) format
1460
+ will improve accuracy and latency.
1461
+ """
1462
+
1463
+ prompt: str = Field(default="")
1464
+ """An optional text to guide the model's style or continue a previous audio
1465
+ segment.
1466
+
1467
+ The [prompt](https://platform.openai.com/docs/guides/speech-to-text#prompting)
1468
+ should match the audio language.
1469
+ """
1470
+
1471
+ response_format: AudioResponseFormat = Field(default="json")
1472
+ """
1473
+ The format of the output, in one of these options: `json`, `text`, `srt`,
1474
+ `verbose_json`, or `vtt`.
1475
+ """
1476
+
1477
+ ## TODO (varun) : Support if set to 0, certain thresholds are met !!
1478
+ temperature: float = Field(default=0.0)
1479
+ """The sampling temperature, between 0 and 1.
1480
+
1481
+ Higher values like 0.8 will make the output more random, while lower values
1482
+ like 0.2 will make it more focused / deterministic. If set to 0, the model
1483
+ will use [log probability](https://en.wikipedia.org/wiki/Log_probability)
1484
+ to automatically increase the temperature until certain thresholds are hit.
1485
+ """
1486
+
1487
+ timestamp_granularities: List[Literal["word", "segment"]] = Field(
1488
+ alias="timestamp_granularities[]", default=[])
1489
+ """The timestamp granularities to populate for this transcription.
1490
+
1491
+ `response_format` must be set `verbose_json` to use timestamp granularities.
1492
+ Either or both of these options are supported: `word`, or `segment`. Note:
1493
+ There is no additional latency for segment timestamps, but generating word
1494
+ timestamps incurs additional latency.
1495
+ """
1496
+
1497
+ # Default sampling parameters for transcription requests.
1498
+ _DEFAULT_SAMPLING_PARAMS: dict = {
1499
+ "temperature": 0,
1500
+ }
1501
+
1502
+ def to_sampling_params(
1503
+ self,
1504
+ default_max_tokens: int,
1505
+ default_sampling_params: Optional[dict] = None) -> SamplingParams:
1506
+ # TODO(#9845): remove max_tokens when field is removed from OpenAI API
1507
+ max_tokens = default_max_tokens
1508
+
1509
+ if default_sampling_params is None:
1510
+ default_sampling_params = {}
1511
+ # Default parameters
1512
+ if (temperature := self.temperature) is None:
1513
+ temperature = default_sampling_params.get(
1514
+ "temperature", self._DEFAULT_SAMPLING_PARAMS["temperature"])
1515
+
1516
+ return SamplingParams.from_optional(temperature=temperature,
1517
+ max_tokens=max_tokens)
1518
+
1519
+
1520
+ # Transcription response objects
1521
+ class TranscriptionResponse(OpenAIBaseModel):
1522
+ text: str
1523
+ """The transcribed text."""
1524
+
1525
+
1526
+ class TranscriptionWord(OpenAIBaseModel):
1527
+ end: float
1528
+ """End time of the word in seconds."""
1529
+
1530
+ start: float
1531
+ """Start time of the word in seconds."""
1532
+
1533
+ word: str
1534
+ """The text content of the word."""
1535
+
1536
+
1537
+ class TranscriptionSegment(OpenAIBaseModel):
1538
+ id: int
1539
+ """Unique identifier of the segment."""
1540
+
1541
+ avg_logprob: float
1542
+ """Average logprob of the segment.
1543
+
1544
+ If the value is lower than -1, consider the logprobs failed.
1545
+ """
1546
+
1547
+ compression_ratio: float
1548
+ """Compression ratio of the segment.
1549
+
1550
+ If the value is greater than 2.4, consider the compression failed.
1551
+ """
1552
+
1553
+ end: float
1554
+ """End time of the segment in seconds."""
1555
+
1556
+ no_speech_prob: float
1557
+ """Probability of no speech in the segment.
1558
+
1559
+ If the value is higher than 1.0 and the `avg_logprob` is below -1, consider
1560
+ this segment silent.
1561
+ """
1562
+
1563
+ seek: int
1564
+ """Seek offset of the segment."""
1565
+
1566
+ start: float
1567
+ """Start time of the segment in seconds."""
1568
+
1569
+ temperature: float
1570
+ """Temperature parameter used for generating the segment."""
1571
+
1572
+ text: str
1573
+ """Text content of the segment."""
1574
+
1575
+ tokens: List[int]
1576
+ """Array of token IDs for the text content."""
1577
+
1578
+
1579
+ class TranscriptionResponseVerbose(OpenAIBaseModel):
1580
+ duration: str
1581
+ """The duration of the input audio."""
1582
+
1583
+ language: str
1584
+ """The language of the input audio."""
1585
+
1586
+ text: str
1587
+ """The transcribed text."""
1588
+
1589
+ segments: Optional[List[TranscriptionSegment]] = None
1590
+ """Segments of the transcribed text and their corresponding details."""
1591
+
1592
+ words: Optional[List[TranscriptionWord]] = None
1593
+ """Extracted words and their corresponding timestamps."""
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__init__.py ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ from .abs_reasoning_parsers import ReasoningParser, ReasoningParserManager
4
+ from .deepseek_r1_reasoning_parser import DeepSeekR1ReasoningParser
5
+
6
+ __all__ = [
7
+ "ReasoningParser", "ReasoningParserManager", "DeepSeekR1ReasoningParser"
8
+ ]
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__pycache__/__init__.cpython-310.pyc ADDED
Binary file (407 Bytes). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__pycache__/abs_reasoning_parsers.cpython-310.pyc ADDED
Binary file (6.02 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/__pycache__/deepseek_r1_reasoning_parser.cpython-310.pyc ADDED
Binary file (3.64 kB). View file
 
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/abs_reasoning_parsers.py ADDED
@@ -0,0 +1,160 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import os
4
+ from functools import cached_property
5
+ from typing import Callable, Dict, List, Optional, Sequence, Tuple, Type, Union
6
+
7
+ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
8
+ DeltaMessage)
9
+ from vllm.logger import init_logger
10
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
11
+ from vllm.utils import import_from_path, is_list_of
12
+
13
+ logger = init_logger(__name__)
14
+
15
+
16
+ class ReasoningParser:
17
+ """
18
+ Abstract reasoning parser class that should not be used directly.
19
+ Provided and methods should be used in derived classes.
20
+
21
+ It is used to extract reasoning content from the model output.
22
+ """
23
+
24
+ def __init__(self, tokenizer: AnyTokenizer):
25
+ self.model_tokenizer = tokenizer
26
+
27
+ @cached_property
28
+ def vocab(self) -> Dict[str, int]:
29
+ # NOTE: Only PreTrainedTokenizerFast is guaranteed to have .vocab
30
+ # whereas all tokenizers have .get_vocab()
31
+ return self.model_tokenizer.get_vocab()
32
+
33
+ def extract_reasoning_content(
34
+ self, model_output: str, request: ChatCompletionRequest
35
+ ) -> Tuple[Optional[str], Optional[str]]:
36
+ """
37
+ Extract reasoning content from a complete model-generated string.
38
+
39
+ Used for non-streaming responses where we have the entire model response
40
+ available before sending to the client.
41
+
42
+ Parameters:
43
+ model_output: str
44
+ The model-generated string to extract reasoning content from.
45
+
46
+ request: ChatCompletionRequest
47
+ The request object that was used to generate the model_output.
48
+
49
+ Returns:
50
+ Tuple[Optional[str], Optional[str]]
51
+ A tuple containing the reasoning content and the content.
52
+ """
53
+
54
+ raise NotImplementedError(
55
+ "AbstractReasoningParser.extract_reasoning_calls "
56
+ "has not been implemented!")
57
+
58
+ def extract_reasoning_content_streaming(
59
+ self,
60
+ previous_text: str,
61
+ current_text: str,
62
+ delta_text: str,
63
+ previous_token_ids: Sequence[int],
64
+ current_token_ids: Sequence[int],
65
+ delta_token_ids: Sequence[int],
66
+ ) -> Union[DeltaMessage, None]:
67
+ """
68
+ Instance method that should be implemented for extracting reasoning
69
+ from an incomplete response; for use when handling reasoning calls and
70
+ streaming. Has to be an instance method because it requires state -
71
+ the current tokens/diffs, but also the information about what has
72
+ previously been parsed and extracted (see constructor)
73
+ """
74
+ raise NotImplementedError(
75
+ "AbstractReasoningParser.extract_reasoning_content_streaming "
76
+ "has not been implemented!")
77
+
78
+
79
+ class ReasoningParserManager:
80
+ reasoning_parsers: Dict[str, Type] = {}
81
+
82
+ @classmethod
83
+ def get_reasoning_parser(cls, name) -> Type:
84
+ """
85
+ Get reasoning parser by name which is registered by `register_module`.
86
+
87
+ Raise a KeyError exception if the name is not registered.
88
+ """
89
+ if name in cls.reasoning_parsers:
90
+ return cls.reasoning_parsers[name]
91
+
92
+ raise KeyError(f"reasoning helper: '{name}' not found in "
93
+ "reasoning_parsers")
94
+
95
+ @classmethod
96
+ def _register_module(cls,
97
+ module: Type,
98
+ module_name: Optional[Union[str, List[str]]] = None,
99
+ force: bool = True) -> None:
100
+ if not issubclass(module, ReasoningParser):
101
+ raise TypeError("module must be subclass of ReasoningParser, "
102
+ f"but got {type(module)}")
103
+ if module_name is None:
104
+ module_name = module.__name__
105
+ if isinstance(module_name, str):
106
+ module_name = [module_name]
107
+ for name in module_name:
108
+ if not force and name in cls.reasoning_parsers:
109
+ existed_module = cls.reasoning_parsers[name]
110
+ raise KeyError(f"{name} is already registered "
111
+ f"at {existed_module.__module__}")
112
+ cls.reasoning_parsers[name] = module
113
+
114
+ @classmethod
115
+ def register_module(
116
+ cls,
117
+ name: Optional[Union[str, List[str]]] = None,
118
+ force: bool = True,
119
+ module: Union[Type, None] = None) -> Union[type, Callable]:
120
+ """
121
+ Register module with the given name or name list. it can be used as a
122
+ decoder(with module as None) or normal function(with module as not
123
+ None).
124
+ """
125
+ if not isinstance(force, bool):
126
+ raise TypeError(f"force must be a boolean, but got {type(force)}")
127
+
128
+ # raise the error ahead of time
129
+ if not (name is None or isinstance(name, str)
130
+ or is_list_of(name, str)):
131
+ raise TypeError(
132
+ "name must be None, an instance of str, or a sequence of str, "
133
+ f"but got {type(name)}")
134
+
135
+ # use it as a normal method: x.register_module(module=SomeClass)
136
+ if module is not None:
137
+ cls._register_module(module=module, module_name=name, force=force)
138
+ return module
139
+
140
+ # use it as a decorator: @x.register_module()
141
+ def _register(module):
142
+ cls._register_module(module=module, module_name=name, force=force)
143
+ return module
144
+
145
+ return _register
146
+
147
+ @classmethod
148
+ def import_reasoning_parser(cls, plugin_path: str) -> None:
149
+ """
150
+ Import a user-defined reasoning parser by the path
151
+ of the reasoning parser define file.
152
+ """
153
+ module_name = os.path.splitext(os.path.basename(plugin_path))[0]
154
+
155
+ try:
156
+ import_from_path(module_name, plugin_path)
157
+ except Exception:
158
+ logger.exception("Failed to load module '%s' from %s.",
159
+ module_name, plugin_path)
160
+ return
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/reasoning_parsers/deepseek_r1_reasoning_parser.py ADDED
@@ -0,0 +1,147 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import re
4
+ from typing import Optional, Sequence, Tuple, Union
5
+
6
+ from transformers import PreTrainedTokenizerBase
7
+
8
+ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
9
+ DeltaMessage)
10
+ from vllm.entrypoints.openai.reasoning_parsers.abs_reasoning_parsers import (
11
+ ReasoningParser, ReasoningParserManager)
12
+ from vllm.logger import init_logger
13
+
14
+ logger = init_logger(__name__)
15
+
16
+
17
+ @ReasoningParserManager.register_module("deepseek_r1")
18
+ class DeepSeekR1ReasoningParser(ReasoningParser):
19
+ """
20
+ Reasoning parser for DeepSeek R1 model.
21
+
22
+ The DeepSeek R1 model uses <think>...</think> tokens to denote reasoning
23
+ text. This parser extracts the reasoning content from the model output.
24
+ """
25
+
26
+ def __init__(self, tokenizer: PreTrainedTokenizerBase):
27
+ super().__init__(tokenizer)
28
+ self.think_start_token = "<think>"
29
+ self.think_end_token = "</think>"
30
+
31
+ self.reasoning_regex = re.compile(
32
+ rf"{self.think_start_token}(.*?){self.think_end_token}", re.DOTALL)
33
+
34
+ if not self.model_tokenizer:
35
+ raise ValueError(
36
+ "The model tokenizer must be passed to the ReasoningParser "
37
+ "constructor during construction.")
38
+
39
+ self.think_start_token_id = self.vocab.get(self.think_start_token)
40
+ self.think_end_token_id = self.vocab.get(self.think_end_token)
41
+ if (self.think_start_token_id is None
42
+ or self.think_end_token_id is None):
43
+ raise RuntimeError(
44
+ "DeepSeek R1 reasoning parser could not locate think start/end "
45
+ "tokens in the tokenizer!")
46
+
47
+ def extract_reasoning_content_streaming(
48
+ self,
49
+ previous_text: str,
50
+ current_text: str,
51
+ delta_text: str,
52
+ previous_token_ids: Sequence[int],
53
+ current_token_ids: Sequence[int],
54
+ delta_token_ids: Sequence[int],
55
+ ) -> Union[DeltaMessage, None]:
56
+ """
57
+ Extract reasoning content from a delta message.
58
+ Handles streaming output where previous + delta = current.
59
+ Uses token IDs for faster processing.
60
+ For text <think>abc</think>xyz:
61
+ - 'abc' goes to reasoning_content
62
+ - 'xyz' goes to content
63
+ """
64
+ # Skip single special tokens
65
+ if len(delta_token_ids) == 1 and (delta_token_ids[0] in [
66
+ self.think_start_token_id, self.think_end_token_id
67
+ ]):
68
+ return None
69
+
70
+ # Check if <think> is present in previous or delta.
71
+ # Keep compatibility with models that don't generate <think> tokens.
72
+ if self.think_start_token_id in previous_token_ids:
73
+ if self.think_end_token_id in delta_token_ids:
74
+ # <think> in previous, </think> in delta,
75
+ # extract reasoning content
76
+ end_index = delta_text.find(self.think_end_token)
77
+ reasoning_content = delta_text[:end_index]
78
+ content = delta_text[end_index + len(self.think_end_token):]
79
+ return DeltaMessage(reasoning_content=reasoning_content,
80
+ content=content if content else None)
81
+ elif self.think_end_token_id in previous_token_ids:
82
+ # <think> in previous, </think> in previous,
83
+ # reasoning content continues
84
+ return DeltaMessage(content=delta_text)
85
+ else:
86
+ # <think> in previous, no </think> in previous or delta,
87
+ # reasoning content continues
88
+ return DeltaMessage(reasoning_content=delta_text)
89
+ elif self.think_start_token_id in delta_token_ids:
90
+ if self.think_end_token_id in delta_token_ids:
91
+ # <think> in delta, </think> in delta, extract reasoning content
92
+ start_index = delta_text.find(self.think_start_token)
93
+ end_index = delta_text.find(self.think_end_token)
94
+ reasoning_content = delta_text[start_index +
95
+ len(self.think_start_token
96
+ ):end_index]
97
+ content = delta_text[end_index + len(self.think_end_token):]
98
+ return DeltaMessage(reasoning_content=reasoning_content,
99
+ content=content if content else None)
100
+ else:
101
+ # <think> in delta, no </think> in delta,
102
+ # reasoning content continues
103
+ return DeltaMessage(reasoning_content=delta_text)
104
+ else:
105
+ # No <think> in previous or delta, also need to check for </think>.
106
+ # Because the model may have generated </think> without <think>
107
+ # Ref https://huggingface.co/deepseek-ai/DeepSeek-R1/commit/8a58a132790c9935686eb97f042afa8013451c9f
108
+ if self.think_end_token_id in delta_token_ids:
109
+ # </think> in delta with more tokens,
110
+ # extract reasoning content and content
111
+ end_index = delta_text.find(self.think_end_token)
112
+ reasoning_content = delta_text[:end_index]
113
+ content = delta_text[end_index + len(self.think_end_token):]
114
+ return DeltaMessage(reasoning_content=reasoning_content,
115
+ content=content if content else None)
116
+ elif self.think_end_token_id in previous_token_ids:
117
+ # </think> in previous, thinking content ends
118
+ return DeltaMessage(content=delta_text)
119
+ else:
120
+ # no </think> in previous or delta, reasoning content continues
121
+ return DeltaMessage(reasoning_content=delta_text)
122
+
123
+ def extract_reasoning_content(
124
+ self, model_output: str, request: ChatCompletionRequest
125
+ ) -> Tuple[Optional[str], Optional[str]]:
126
+
127
+ # DeepSeek R1 doesn't generate <think> now.
128
+ # Thus we assume the reasoning content is always at the start.
129
+ # Ref https://huggingface.co/deepseek-ai/DeepSeek-R1/commit/8a58a132790c9935686eb97f042afa8013451c9f
130
+ if self.think_end_token not in model_output:
131
+ return model_output, None
132
+ else:
133
+ # Add a start token if it's missing to keep compatibility.
134
+ if self.think_start_token not in model_output:
135
+ model_output = f"{self.think_start_token}{model_output}"
136
+ # Use a regex to find the reasoning content
137
+ reasoning_content = self.reasoning_regex.findall(model_output)[0]
138
+
139
+ end_index = len(
140
+ f"{self.think_start_token}{reasoning_content}{self.think_end_token}"
141
+ )
142
+ final_output = model_output[end_index:]
143
+
144
+ if len(final_output) == 0:
145
+ return reasoning_content, None
146
+
147
+ return reasoning_content, final_output
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/run_batch.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import tempfile
5
+ from http import HTTPStatus
6
+ from io import StringIO
7
+ from typing import Awaitable, Callable, List, Optional
8
+
9
+ import aiohttp
10
+ import torch
11
+ from prometheus_client import start_http_server
12
+ from tqdm import tqdm
13
+
14
+ from vllm.engine.arg_utils import AsyncEngineArgs, nullable_str
15
+ from vllm.engine.async_llm_engine import AsyncLLMEngine
16
+ from vllm.entrypoints.logger import RequestLogger, logger
17
+ # yapf: disable
18
+ from vllm.entrypoints.openai.protocol import (BatchRequestInput,
19
+ BatchRequestOutput,
20
+ BatchResponseData,
21
+ ChatCompletionResponse,
22
+ EmbeddingResponse, ErrorResponse,
23
+ ScoreResponse)
24
+ # yapf: enable
25
+ from vllm.entrypoints.openai.serving_chat import OpenAIServingChat
26
+ from vllm.entrypoints.openai.serving_embedding import OpenAIServingEmbedding
27
+ from vllm.entrypoints.openai.serving_models import (BaseModelPath,
28
+ OpenAIServingModels)
29
+ from vllm.entrypoints.openai.serving_score import OpenAIServingScores
30
+ from vllm.usage.usage_lib import UsageContext
31
+ from vllm.utils import FlexibleArgumentParser, random_uuid
32
+ from vllm.version import __version__ as VLLM_VERSION
33
+
34
+
35
+ def parse_args():
36
+ parser = FlexibleArgumentParser(
37
+ description="vLLM OpenAI-Compatible batch runner.")
38
+ parser.add_argument(
39
+ "-i",
40
+ "--input-file",
41
+ required=True,
42
+ type=str,
43
+ help=
44
+ "The path or url to a single input file. Currently supports local file "
45
+ "paths, or the http protocol (http or https). If a URL is specified, "
46
+ "the file should be available via HTTP GET.")
47
+ parser.add_argument(
48
+ "-o",
49
+ "--output-file",
50
+ required=True,
51
+ type=str,
52
+ help="The path or url to a single output file. Currently supports "
53
+ "local file paths, or web (http or https) urls. If a URL is specified,"
54
+ " the file should be available via HTTP PUT.")
55
+ parser.add_argument(
56
+ "--output-tmp-dir",
57
+ type=str,
58
+ default=None,
59
+ help="The directory to store the output file before uploading it "
60
+ "to the output URL.",
61
+ )
62
+ parser.add_argument("--response-role",
63
+ type=nullable_str,
64
+ default="assistant",
65
+ help="The role name to return if "
66
+ "`request.add_generation_prompt=True`.")
67
+
68
+ parser = AsyncEngineArgs.add_cli_args(parser)
69
+
70
+ parser.add_argument('--max-log-len',
71
+ type=int,
72
+ default=None,
73
+ help='Max number of prompt characters or prompt '
74
+ 'ID numbers being printed in log.'
75
+ '\n\nDefault: Unlimited')
76
+
77
+ parser.add_argument("--enable-metrics",
78
+ action="store_true",
79
+ help="Enable Prometheus metrics")
80
+ parser.add_argument(
81
+ "--url",
82
+ type=str,
83
+ default="0.0.0.0",
84
+ help="URL to the Prometheus metrics server "
85
+ "(only needed if enable-metrics is set).",
86
+ )
87
+ parser.add_argument(
88
+ "--port",
89
+ type=int,
90
+ default=8000,
91
+ help="Port number for the Prometheus metrics server "
92
+ "(only needed if enable-metrics is set).",
93
+ )
94
+ parser.add_argument(
95
+ "--enable-prompt-tokens-details",
96
+ action='store_true',
97
+ default=False,
98
+ help="If set to True, enable prompt_tokens_details in usage.")
99
+
100
+ return parser.parse_args()
101
+
102
+
103
+ # explicitly use pure text format, with a newline at the end
104
+ # this makes it impossible to see the animation in the progress bar
105
+ # but will avoid messing up with ray or multiprocessing, which wraps
106
+ # each line of output with some prefix.
107
+ _BAR_FORMAT = "{desc}: {percentage:3.0f}% Completed | {n_fmt}/{total_fmt} [{elapsed}<{remaining}, {rate_fmt}]\n" # noqa: E501
108
+
109
+
110
+ class BatchProgressTracker:
111
+
112
+ def __init__(self):
113
+ self._total = 0
114
+ self._pbar: Optional[tqdm] = None
115
+
116
+ def submitted(self):
117
+ self._total += 1
118
+
119
+ def completed(self):
120
+ if self._pbar:
121
+ self._pbar.update()
122
+
123
+ def pbar(self) -> tqdm:
124
+ enable_tqdm = not torch.distributed.is_initialized(
125
+ ) or torch.distributed.get_rank() == 0
126
+ self._pbar = tqdm(total=self._total,
127
+ unit="req",
128
+ desc="Running batch",
129
+ mininterval=5,
130
+ disable=not enable_tqdm,
131
+ bar_format=_BAR_FORMAT)
132
+ return self._pbar
133
+
134
+
135
+ async def read_file(path_or_url: str) -> str:
136
+ if path_or_url.startswith("http://") or path_or_url.startswith("https://"):
137
+ async with aiohttp.ClientSession() as session, \
138
+ session.get(path_or_url) as resp:
139
+ return await resp.text()
140
+ else:
141
+ with open(path_or_url, encoding="utf-8") as f:
142
+ return f.read()
143
+
144
+
145
+ async def write_local_file(output_path: str,
146
+ batch_outputs: List[BatchRequestOutput]) -> None:
147
+ """
148
+ Write the responses to a local file.
149
+ output_path: The path to write the responses to.
150
+ batch_outputs: The list of batch outputs to write.
151
+ """
152
+ # We should make this async, but as long as run_batch runs as a
153
+ # standalone program, blocking the event loop won't effect performance.
154
+ with open(output_path, "w", encoding="utf-8") as f:
155
+ for o in batch_outputs:
156
+ print(o.model_dump_json(), file=f)
157
+
158
+
159
+ async def upload_data(output_url: str, data_or_file: str,
160
+ from_file: bool) -> None:
161
+ """
162
+ Upload a local file to a URL.
163
+ output_url: The URL to upload the file to.
164
+ data_or_file: Either the data to upload or the path to the file to upload.
165
+ from_file: If True, data_or_file is the path to the file to upload.
166
+ """
167
+ # Timeout is a common issue when uploading large files.
168
+ # We retry max_retries times before giving up.
169
+ max_retries = 5
170
+ # Number of seconds to wait before retrying.
171
+ delay = 5
172
+
173
+ for attempt in range(1, max_retries + 1):
174
+ try:
175
+ # We increase the timeout to 1000 seconds to allow
176
+ # for large files (default is 300).
177
+ async with aiohttp.ClientSession(timeout=aiohttp.ClientTimeout(
178
+ total=1000)) as session:
179
+ if from_file:
180
+ with open(data_or_file, "rb") as file:
181
+ async with session.put(output_url,
182
+ data=file) as response:
183
+ if response.status != 200:
184
+ raise Exception(f"Failed to upload file.\n"
185
+ f"Status: {response.status}\n"
186
+ f"Response: {response.text()}")
187
+ else:
188
+ async with session.put(output_url,
189
+ data=data_or_file) as response:
190
+ if response.status != 200:
191
+ raise Exception(f"Failed to upload data.\n"
192
+ f"Status: {response.status}\n"
193
+ f"Response: {response.text()}")
194
+
195
+ except Exception as e:
196
+ if attempt < max_retries:
197
+ logger.error(
198
+ f"Failed to upload data (attempt {attempt}). "
199
+ f"Error message: {str(e)}.\nRetrying in {delay} seconds..."
200
+ )
201
+ await asyncio.sleep(delay)
202
+ else:
203
+ raise Exception(f"Failed to upload data (attempt {attempt}). "
204
+ f"Error message: {str(e)}.") from e
205
+
206
+
207
+ async def write_file(path_or_url: str, batch_outputs: List[BatchRequestOutput],
208
+ output_tmp_dir: str) -> None:
209
+ """
210
+ Write batch_outputs to a file or upload to a URL.
211
+ path_or_url: The path or URL to write batch_outputs to.
212
+ batch_outputs: The list of batch outputs to write.
213
+ output_tmp_dir: The directory to store the output file before uploading it
214
+ to the output URL.
215
+ """
216
+ if path_or_url.startswith("http://") or path_or_url.startswith("https://"):
217
+ if output_tmp_dir is None:
218
+ logger.info("Writing outputs to memory buffer")
219
+ output_buffer = StringIO()
220
+ for o in batch_outputs:
221
+ print(o.model_dump_json(), file=output_buffer)
222
+ output_buffer.seek(0)
223
+ logger.info("Uploading outputs to %s", path_or_url)
224
+ await upload_data(
225
+ path_or_url,
226
+ output_buffer.read().strip().encode("utf-8"),
227
+ from_file=False,
228
+ )
229
+ else:
230
+ # Write responses to a temporary file and then upload it to the URL.
231
+ with tempfile.NamedTemporaryFile(
232
+ mode="w",
233
+ encoding="utf-8",
234
+ dir=output_tmp_dir,
235
+ prefix="tmp_batch_output_",
236
+ suffix=".jsonl",
237
+ ) as f:
238
+ logger.info("Writing outputs to temporary local file %s",
239
+ f.name)
240
+ await write_local_file(f.name, batch_outputs)
241
+ logger.info("Uploading outputs to %s", path_or_url)
242
+ await upload_data(path_or_url, f.name, from_file=True)
243
+ else:
244
+ logger.info("Writing outputs to local file %s", path_or_url)
245
+ await write_local_file(path_or_url, batch_outputs)
246
+
247
+
248
+ def make_error_request_output(request: BatchRequestInput,
249
+ error_msg: str) -> BatchRequestOutput:
250
+ batch_output = BatchRequestOutput(
251
+ id=f"vllm-{random_uuid()}",
252
+ custom_id=request.custom_id,
253
+ response=BatchResponseData(
254
+ status_code=HTTPStatus.BAD_REQUEST,
255
+ request_id=f"vllm-batch-{random_uuid()}",
256
+ ),
257
+ error=error_msg,
258
+ )
259
+ return batch_output
260
+
261
+
262
+ async def make_async_error_request_output(
263
+ request: BatchRequestInput, error_msg: str) -> BatchRequestOutput:
264
+ return make_error_request_output(request, error_msg)
265
+
266
+
267
+ async def run_request(serving_engine_func: Callable,
268
+ request: BatchRequestInput,
269
+ tracker: BatchProgressTracker) -> BatchRequestOutput:
270
+ response = await serving_engine_func(request.body)
271
+
272
+ if isinstance(response,
273
+ (ChatCompletionResponse, EmbeddingResponse, ScoreResponse)):
274
+ batch_output = BatchRequestOutput(
275
+ id=f"vllm-{random_uuid()}",
276
+ custom_id=request.custom_id,
277
+ response=BatchResponseData(
278
+ body=response, request_id=f"vllm-batch-{random_uuid()}"),
279
+ error=None,
280
+ )
281
+ elif isinstance(response, ErrorResponse):
282
+ batch_output = BatchRequestOutput(
283
+ id=f"vllm-{random_uuid()}",
284
+ custom_id=request.custom_id,
285
+ response=BatchResponseData(
286
+ status_code=response.code,
287
+ request_id=f"vllm-batch-{random_uuid()}"),
288
+ error=response,
289
+ )
290
+ else:
291
+ batch_output = make_error_request_output(
292
+ request, error_msg="Request must not be sent in stream mode")
293
+
294
+ tracker.completed()
295
+ return batch_output
296
+
297
+
298
+ async def main(args):
299
+ if args.served_model_name is not None:
300
+ served_model_names = args.served_model_name
301
+ else:
302
+ served_model_names = [args.model]
303
+
304
+ engine_args = AsyncEngineArgs.from_cli_args(args)
305
+ engine = AsyncLLMEngine.from_engine_args(
306
+ engine_args, usage_context=UsageContext.OPENAI_BATCH_RUNNER)
307
+
308
+ model_config = await engine.get_model_config()
309
+ base_model_paths = [
310
+ BaseModelPath(name=name, model_path=args.model)
311
+ for name in served_model_names
312
+ ]
313
+
314
+ if args.disable_log_requests:
315
+ request_logger = None
316
+ else:
317
+ request_logger = RequestLogger(max_log_len=args.max_log_len)
318
+
319
+ # Create the openai serving objects.
320
+ openai_serving_models = OpenAIServingModels(
321
+ engine_client=engine,
322
+ model_config=model_config,
323
+ base_model_paths=base_model_paths,
324
+ lora_modules=None,
325
+ prompt_adapters=None,
326
+ )
327
+ openai_serving_chat = OpenAIServingChat(
328
+ engine,
329
+ model_config,
330
+ openai_serving_models,
331
+ args.response_role,
332
+ request_logger=request_logger,
333
+ chat_template=None,
334
+ chat_template_content_format="auto",
335
+ enable_prompt_tokens_details=args.enable_prompt_tokens_details,
336
+ ) if model_config.runner_type == "generate" else None
337
+ openai_serving_embedding = OpenAIServingEmbedding(
338
+ engine,
339
+ model_config,
340
+ openai_serving_models,
341
+ request_logger=request_logger,
342
+ chat_template=None,
343
+ chat_template_content_format="auto",
344
+ ) if model_config.task == "embed" else None
345
+ openai_serving_scores = (OpenAIServingScores(
346
+ engine,
347
+ model_config,
348
+ openai_serving_models,
349
+ request_logger=request_logger,
350
+ ) if model_config.task == "score" else None)
351
+
352
+ tracker = BatchProgressTracker()
353
+ logger.info("Reading batch from %s...", args.input_file)
354
+
355
+ # Submit all requests in the file to the engine "concurrently".
356
+ response_futures: List[Awaitable[BatchRequestOutput]] = []
357
+ for request_json in (await read_file(args.input_file)).strip().split("\n"):
358
+ # Skip empty lines.
359
+ request_json = request_json.strip()
360
+ if not request_json:
361
+ continue
362
+
363
+ request = BatchRequestInput.model_validate_json(request_json)
364
+
365
+ # Determine the type of request and run it.
366
+ if request.url == "/v1/chat/completions":
367
+ handler_fn = (None if openai_serving_chat is None else
368
+ openai_serving_chat.create_chat_completion)
369
+ if handler_fn is None:
370
+ response_futures.append(
371
+ make_async_error_request_output(
372
+ request,
373
+ error_msg=
374
+ "The model does not support Chat Completions API",
375
+ ))
376
+ continue
377
+
378
+ response_futures.append(run_request(handler_fn, request, tracker))
379
+ tracker.submitted()
380
+ elif request.url == "/v1/embeddings":
381
+ handler_fn = (None if openai_serving_embedding is None else
382
+ openai_serving_embedding.create_embedding)
383
+ if handler_fn is None:
384
+ response_futures.append(
385
+ make_async_error_request_output(
386
+ request,
387
+ error_msg="The model does not support Embeddings API",
388
+ ))
389
+ continue
390
+
391
+ response_futures.append(run_request(handler_fn, request, tracker))
392
+ tracker.submitted()
393
+ elif request.url == "/v1/score":
394
+ handler_fn = (None if openai_serving_scores is None else
395
+ openai_serving_scores.create_score)
396
+ if handler_fn is None:
397
+ response_futures.append(
398
+ make_async_error_request_output(
399
+ request,
400
+ error_msg="The model does not support Scores API",
401
+ ))
402
+ continue
403
+
404
+ response_futures.append(run_request(handler_fn, request, tracker))
405
+ tracker.submitted()
406
+ else:
407
+ response_futures.append(
408
+ make_async_error_request_output(
409
+ request,
410
+ error_msg=
411
+ "Only /v1/chat/completions, /v1/embeddings, and /v1/score "
412
+ "are supported in the batch endpoint.",
413
+ ))
414
+
415
+ with tracker.pbar():
416
+ responses = await asyncio.gather(*response_futures)
417
+
418
+ await write_file(args.output_file, responses, args.output_tmp_dir)
419
+
420
+
421
+ if __name__ == "__main__":
422
+ args = parse_args()
423
+
424
+ logger.info("vLLM batch processing API version %s", VLLM_VERSION)
425
+ logger.info("args: %s", args)
426
+
427
+ # Start the Prometheus metrics server. LLMEngine uses the Prometheus client
428
+ # to publish metrics at the /metrics endpoint.
429
+ if args.enable_metrics:
430
+ logger.info("Prometheus metrics enabled")
431
+ start_http_server(port=args.port, addr=args.url)
432
+ else:
433
+ logger.info("Prometheus metrics disabled")
434
+
435
+ asyncio.run(main(args))
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_chat.py ADDED
@@ -0,0 +1,961 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import json
5
+ import time
6
+ from typing import (AsyncGenerator, AsyncIterator, Callable, Dict, Final, List,
7
+ Optional)
8
+ from typing import Sequence as GenericSequence
9
+ from typing import Union
10
+
11
+ from fastapi import Request
12
+
13
+ from vllm.config import ModelConfig
14
+ from vllm.engine.protocol import EngineClient
15
+ from vllm.entrypoints.chat_utils import (ChatTemplateContentFormatOption,
16
+ ConversationMessage)
17
+ from vllm.entrypoints.logger import RequestLogger
18
+ from vllm.entrypoints.openai.protocol import (
19
+ ChatCompletionLogProb, ChatCompletionLogProbs,
20
+ ChatCompletionLogProbsContent, ChatCompletionNamedToolChoiceParam,
21
+ ChatCompletionRequest, ChatCompletionResponse,
22
+ ChatCompletionResponseChoice, ChatCompletionResponseStreamChoice,
23
+ ChatCompletionStreamResponse, ChatMessage, DeltaFunctionCall, DeltaMessage,
24
+ DeltaToolCall, ErrorResponse, FunctionCall, PromptTokenUsageInfo,
25
+ RequestResponseMetadata, ToolCall, UsageInfo)
26
+ from vllm.entrypoints.openai.reasoning_parsers import (ReasoningParser,
27
+ ReasoningParserManager)
28
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
29
+ from vllm.entrypoints.openai.serving_models import OpenAIServingModels
30
+ from vllm.entrypoints.openai.tool_parsers import ToolParser, ToolParserManager
31
+ from vllm.entrypoints.openai.tool_parsers.mistral_tool_parser import (
32
+ MistralToolCall)
33
+ from vllm.logger import init_logger
34
+ from vllm.outputs import CompletionOutput, RequestOutput
35
+ from vllm.sampling_params import BeamSearchParams, SamplingParams
36
+ from vllm.sequence import Logprob
37
+ from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
38
+ from vllm.transformers_utils.tokenizers import (maybe_serialize_tool_calls,
39
+ truncate_tool_call_ids)
40
+
41
+ logger = init_logger(__name__)
42
+
43
+
44
+ class OpenAIServingChat(OpenAIServing):
45
+
46
+ def __init__(
47
+ self,
48
+ engine_client: EngineClient,
49
+ model_config: ModelConfig,
50
+ models: OpenAIServingModels,
51
+ response_role: str,
52
+ *,
53
+ request_logger: Optional[RequestLogger],
54
+ chat_template: Optional[str],
55
+ chat_template_content_format: ChatTemplateContentFormatOption,
56
+ return_tokens_as_token_ids: bool = False,
57
+ enable_reasoning: bool = False,
58
+ reasoning_parser: Optional[str] = None,
59
+ enable_auto_tools: bool = False,
60
+ tool_parser: Optional[str] = None,
61
+ enable_prompt_tokens_details: bool = False,
62
+ ) -> None:
63
+ super().__init__(engine_client=engine_client,
64
+ model_config=model_config,
65
+ models=models,
66
+ request_logger=request_logger,
67
+ return_tokens_as_token_ids=return_tokens_as_token_ids)
68
+
69
+ self.response_role = response_role
70
+ self.chat_template = chat_template
71
+ self.chat_template_content_format: Final = chat_template_content_format
72
+
73
+ # set up tool use
74
+ self.enable_auto_tools: bool = enable_auto_tools
75
+ if self.enable_auto_tools:
76
+ logger.info(
77
+ "\"auto\" tool choice has been enabled please note that while"
78
+ " the parallel_tool_calls client option is preset for "
79
+ "compatibility reasons, it will be ignored.")
80
+
81
+ self.enable_reasoning: bool = enable_reasoning
82
+ self.reasoning_parser: Optional[Callable[[AnyTokenizer],
83
+ ReasoningParser]] = None
84
+ if self.enable_reasoning:
85
+ try:
86
+ self.reasoning_parser = (
87
+ ReasoningParserManager.get_reasoning_parser(
88
+ reasoning_parser))
89
+ except Exception as e:
90
+ raise TypeError("Error: --enable-reasoning requires "
91
+ f"reasoning_parser:'{reasoning_parser}' "
92
+ "which has not been registered") from e
93
+ self.tool_parser: Optional[Callable[[AnyTokenizer], ToolParser]] = None
94
+ if self.enable_auto_tools:
95
+ try:
96
+ if (tool_parser == "pythonic" and
97
+ model_config.model.startswith("meta-llama/Llama-3.2")):
98
+ logger.warning(
99
+ "Llama3.2 models may struggle to emit valid pythonic"
100
+ " tool calls")
101
+ self.tool_parser = ToolParserManager.get_tool_parser(
102
+ tool_parser)
103
+ except Exception as e:
104
+ raise TypeError("Error: --enable-auto-tool-choice requires "
105
+ f"tool_parser:'{tool_parser}' which has not "
106
+ "been registered") from e
107
+
108
+ self.enable_prompt_tokens_details = enable_prompt_tokens_details
109
+ diff_sampling_param = self.model_config.get_diff_sampling_param()
110
+ if diff_sampling_param:
111
+ logger.info("Overwriting default chat sampling param with: %s",
112
+ diff_sampling_param)
113
+
114
+ async def create_chat_completion(
115
+ self,
116
+ request: ChatCompletionRequest,
117
+ raw_request: Optional[Request] = None,
118
+ ) -> Union[AsyncGenerator[str, None], ChatCompletionResponse,
119
+ ErrorResponse]:
120
+ """
121
+ Chat Completion API similar to OpenAI's API.
122
+
123
+ See https://platform.openai.com/docs/api-reference/chat/create
124
+ for the API specification. This API mimics the OpenAI
125
+ Chat Completion API.
126
+ """
127
+ error_check_ret = await self._check_model(request)
128
+ if error_check_ret is not None:
129
+ logger.error("Error with model %s", error_check_ret)
130
+ return error_check_ret
131
+
132
+ # If the engine is dead, raise the engine's DEAD_ERROR.
133
+ # This is required for the streaming case, where we return a
134
+ # success status before we actually start generating text :).
135
+ if self.engine_client.errored:
136
+ raise self.engine_client.dead_error
137
+
138
+ try:
139
+ (
140
+ lora_request,
141
+ prompt_adapter_request,
142
+ ) = self._maybe_get_adapters(request)
143
+
144
+ model_name = self.models.model_name(lora_request)
145
+
146
+ tokenizer = await self.engine_client.get_tokenizer(lora_request)
147
+
148
+ tool_parser = self.tool_parser
149
+
150
+ # validation for OpenAI tools
151
+ # tool_choice = "required" is not supported
152
+ if request.tool_choice == "required":
153
+ return self.create_error_response(
154
+ "tool_choice = \"required\" is not supported!")
155
+
156
+ if isinstance(tokenizer, MistralTokenizer):
157
+ # because of issues with pydantic we need to potentially
158
+ # re-serialize the tool_calls field of the request
159
+ # for more info: see comment in `maybe_serialize_tool_calls`
160
+ maybe_serialize_tool_calls(request)
161
+ truncate_tool_call_ids(request)
162
+
163
+ if (request.tool_choice == "auto" and
164
+ not (self.enable_auto_tools and tool_parser is not None)
165
+ and not isinstance(tokenizer, MistralTokenizer)):
166
+ # for hf tokenizers, "auto" tools requires
167
+ # --enable-auto-tool-choice and --tool-call-parser
168
+ return self.create_error_response(
169
+ "\"auto\" tool choice requires "
170
+ "--enable-auto-tool-choice and --tool-call-parser to be set"
171
+ )
172
+
173
+ tool_dicts = None if request.tools is None else [
174
+ tool.model_dump() for tool in request.tools
175
+ ]
176
+
177
+ (
178
+ conversation,
179
+ request_prompts,
180
+ engine_prompts,
181
+ ) = await self._preprocess_chat(
182
+ request,
183
+ tokenizer,
184
+ request.messages,
185
+ chat_template=request.chat_template or self.chat_template,
186
+ chat_template_content_format=self.chat_template_content_format,
187
+ add_generation_prompt=request.add_generation_prompt,
188
+ continue_final_message=request.continue_final_message,
189
+ tool_dicts=tool_dicts,
190
+ documents=request.documents,
191
+ chat_template_kwargs=request.chat_template_kwargs,
192
+ tool_parser=tool_parser,
193
+ truncate_prompt_tokens=request.truncate_prompt_tokens,
194
+ add_special_tokens=request.add_special_tokens,
195
+ )
196
+ except ValueError as e:
197
+ logger.exception("Error in preprocessing prompt inputs")
198
+ return self.create_error_response(str(e))
199
+
200
+ request_id = "chatcmpl-" \
201
+ f"{self._base_request_id(raw_request, request.request_id)}"
202
+
203
+ request_metadata = RequestResponseMetadata(request_id=request_id)
204
+ if raw_request:
205
+ raw_request.state.request_metadata = request_metadata
206
+
207
+ # Schedule the request and get the result generator.
208
+ generators: List[AsyncGenerator[RequestOutput, None]] = []
209
+ try:
210
+ for i, engine_prompt in enumerate(engine_prompts):
211
+ sampling_params: Union[SamplingParams, BeamSearchParams]
212
+ default_max_tokens = self.max_model_len - len(
213
+ engine_prompt["prompt_token_ids"])
214
+ # Build default sampling params
215
+ default_sampling_params = (
216
+ self.model_config.get_diff_sampling_param())
217
+ if request.use_beam_search:
218
+ sampling_params = request.to_beam_search_params(
219
+ default_max_tokens, default_sampling_params)
220
+ else:
221
+ sampling_params = request.to_sampling_params(
222
+ default_max_tokens,
223
+ self.model_config.logits_processor_pattern,
224
+ default_sampling_params)
225
+
226
+ self._log_inputs(request_id,
227
+ request_prompts[i],
228
+ params=sampling_params,
229
+ lora_request=lora_request,
230
+ prompt_adapter_request=prompt_adapter_request)
231
+
232
+ trace_headers = (None if raw_request is None else await
233
+ self._get_trace_headers(raw_request.headers))
234
+
235
+ if isinstance(sampling_params, BeamSearchParams):
236
+ generator = self.engine_client.beam_search(
237
+ prompt=engine_prompt,
238
+ request_id=request_id,
239
+ params=sampling_params,
240
+ )
241
+ else:
242
+ generator = self.engine_client.generate(
243
+ engine_prompt,
244
+ sampling_params,
245
+ request_id,
246
+ lora_request=lora_request,
247
+ trace_headers=trace_headers,
248
+ prompt_adapter_request=prompt_adapter_request,
249
+ priority=request.priority,
250
+ )
251
+
252
+ generators.append(generator)
253
+ except ValueError as e:
254
+ # TODO: Use a vllm-specific Validation Error
255
+ return self.create_error_response(str(e))
256
+
257
+ assert len(generators) == 1
258
+ result_generator, = generators
259
+
260
+ # Streaming response
261
+ if request.stream:
262
+ return self.chat_completion_stream_generator(
263
+ request, result_generator, request_id, model_name,
264
+ conversation, tokenizer, request_metadata)
265
+
266
+ try:
267
+ return await self.chat_completion_full_generator(
268
+ request, result_generator, request_id, model_name,
269
+ conversation, tokenizer, request_metadata)
270
+ except ValueError as e:
271
+ # TODO: Use a vllm-specific Validation Error
272
+ return self.create_error_response(str(e))
273
+
274
+ def get_chat_request_role(self, request: ChatCompletionRequest) -> str:
275
+ if request.add_generation_prompt:
276
+ return self.response_role
277
+ return request.messages[-1]["role"]
278
+
279
+ async def chat_completion_stream_generator(
280
+ self,
281
+ request: ChatCompletionRequest,
282
+ result_generator: AsyncIterator[RequestOutput],
283
+ request_id: str,
284
+ model_name: str,
285
+ conversation: List[ConversationMessage],
286
+ tokenizer: AnyTokenizer,
287
+ request_metadata: RequestResponseMetadata,
288
+ ) -> AsyncGenerator[str, None]:
289
+ created_time = int(time.time())
290
+ chunk_object_type: Final = "chat.completion.chunk"
291
+ first_iteration = True
292
+
293
+ # Send response for each token for each request.n (index)
294
+ num_choices = 1 if request.n is None else request.n
295
+ previous_num_tokens = [0] * num_choices
296
+ finish_reason_sent = [False] * num_choices
297
+ num_prompt_tokens = 0
298
+ num_cached_tokens = None
299
+
300
+ if isinstance(request.tool_choice, ChatCompletionNamedToolChoiceParam):
301
+ tool_choice_function_name = request.tool_choice.function.name
302
+ else:
303
+ tool_choice_function_name = None
304
+
305
+ # Determine whether tools are in use with "auto" tool choice
306
+ tool_choice_auto = (
307
+ not tool_choice_function_name
308
+ and self._should_stream_with_auto_tool_parsing(request))
309
+
310
+ should_stream_with_reasoning_parsing = (
311
+ self._should_stream_with_reasoning_parsing(request))
312
+
313
+ all_previous_token_ids: Optional[List[List[int]]]
314
+
315
+ # Only one of these will be used, thus previous_texts and
316
+ # all_previous_token_ids will not be used twice in the same iteration.
317
+ if tool_choice_auto or should_stream_with_reasoning_parsing:
318
+ # These are only required in "auto" tool choice case
319
+ previous_texts = [""] * num_choices
320
+ all_previous_token_ids = [[]] * num_choices
321
+ else:
322
+ previous_texts, all_previous_token_ids = None, None
323
+
324
+ try:
325
+ # There is no need to check if the reasoning_parser is None
326
+ # because the should_stream_with_reasoning_parsing check
327
+ # already ensures that the reasoning_parser is not None.
328
+ # but the pre-commit hook requires it.
329
+ if should_stream_with_reasoning_parsing and \
330
+ self.reasoning_parser is not None:
331
+ reasoning_parser = self.reasoning_parser(tokenizer)
332
+ except RuntimeError as e:
333
+ logger.exception("Error in reasoning parser creation.")
334
+ data = self.create_streaming_error_response(str(e))
335
+ yield f"data: {data}\n\n"
336
+ yield "data: [DONE]\n\n"
337
+ return
338
+
339
+ # Prepare the tool parser if it's needed
340
+ try:
341
+ if tool_choice_auto and self.tool_parser:
342
+ tool_parsers: List[Optional[ToolParser]] = [
343
+ self.tool_parser(tokenizer)
344
+ ] * num_choices
345
+ else:
346
+ tool_parsers = [None] * num_choices
347
+ except Exception as e:
348
+ logger.exception("Error in tool parser creation.")
349
+ data = self.create_streaming_error_response(str(e))
350
+ yield f"data: {data}\n\n"
351
+ yield "data: [DONE]\n\n"
352
+ return
353
+
354
+ stream_options = request.stream_options
355
+ if stream_options:
356
+ include_usage = stream_options.include_usage
357
+ include_continuous_usage = include_usage and \
358
+ stream_options.continuous_usage_stats
359
+ else:
360
+ include_usage, include_continuous_usage = False, False
361
+
362
+ try:
363
+ async for res in result_generator:
364
+ if res.prompt_token_ids is not None:
365
+ num_prompt_tokens = len(res.prompt_token_ids)
366
+ if res.encoder_prompt_token_ids is not None:
367
+ num_prompt_tokens += len(res.encoder_prompt_token_ids)
368
+
369
+ # We need to do it here, because if there are exceptions in
370
+ # the result_generator, it needs to be sent as the FIRST
371
+ # response (by the try...catch).
372
+ if first_iteration:
373
+ num_cached_tokens = res.num_cached_tokens
374
+ # Send first response for each request.n (index) with
375
+ # the role
376
+ role = self.get_chat_request_role(request)
377
+
378
+ # NOTE num_choices defaults to 1 so this usually executes
379
+ # once per request
380
+ for i in range(num_choices):
381
+ choice_data = ChatCompletionResponseStreamChoice(
382
+ index=i,
383
+ delta=DeltaMessage(
384
+ role=role,
385
+ content="",
386
+ ),
387
+ logprobs=None,
388
+ finish_reason=None)
389
+ chunk = ChatCompletionStreamResponse(
390
+ id=request_id,
391
+ object=chunk_object_type,
392
+ created=created_time,
393
+ choices=[choice_data],
394
+ model=model_name)
395
+
396
+ # if continuous usage stats are requested, add it
397
+ if include_continuous_usage:
398
+ chunk.usage = UsageInfo(
399
+ prompt_tokens=num_prompt_tokens,
400
+ completion_tokens=0,
401
+ total_tokens=num_prompt_tokens)
402
+
403
+ data = chunk.model_dump_json(exclude_unset=True)
404
+ yield f"data: {data}\n\n"
405
+
406
+ # Send response to echo the input portion of the
407
+ # last message
408
+ if request.echo:
409
+ last_msg_content: Union[str, List[Dict[str, str]]] = ""
410
+ if conversation and "content" in conversation[
411
+ -1] and conversation[-1].get("role") == role:
412
+ last_msg_content = conversation[-1]["content"] or ""
413
+
414
+ if last_msg_content:
415
+ for i in range(num_choices):
416
+ choice_data = (
417
+ ChatCompletionResponseStreamChoice(
418
+ index=i,
419
+ delta=DeltaMessage(
420
+ content=last_msg_content),
421
+ logprobs=None,
422
+ finish_reason=None))
423
+ chunk = ChatCompletionStreamResponse(
424
+ id=request_id,
425
+ object=chunk_object_type,
426
+ created=created_time,
427
+ choices=[choice_data],
428
+ model=model_name)
429
+ if include_continuous_usage:
430
+ chunk.usage = UsageInfo(
431
+ prompt_tokens=num_prompt_tokens,
432
+ completion_tokens=0,
433
+ total_tokens=num_prompt_tokens)
434
+
435
+ data = chunk.model_dump_json(
436
+ exclude_unset=True)
437
+ yield f"data: {data}\n\n"
438
+ first_iteration = False
439
+
440
+ for output in res.outputs:
441
+ i = output.index
442
+ tool_parser = tool_parsers[i]
443
+
444
+ if finish_reason_sent[i]:
445
+ continue
446
+
447
+ if request.logprobs and request.top_logprobs is not None:
448
+ assert output.logprobs is not None, (
449
+ "Did not output logprobs")
450
+ logprobs = self._create_chat_logprobs(
451
+ token_ids=output.token_ids,
452
+ top_logprobs=output.logprobs,
453
+ tokenizer=tokenizer,
454
+ num_output_top_logprobs=request.top_logprobs,
455
+ )
456
+ else:
457
+ logprobs = None
458
+
459
+ delta_text = output.text
460
+
461
+ if not delta_text and not output.token_ids and \
462
+ not previous_num_tokens[i]:
463
+ # Chunked prefill case, don't return empty chunks
464
+ continue
465
+
466
+ delta_message: Optional[DeltaMessage]
467
+
468
+ # handle streaming deltas for tools with named tool_choice
469
+ if tool_choice_function_name:
470
+ delta_message = DeltaMessage(tool_calls=[
471
+ DeltaToolCall(function=DeltaFunctionCall(
472
+ name=tool_choice_function_name,
473
+ arguments=delta_text),
474
+ index=i)
475
+ ])
476
+
477
+ # handle streaming deltas for tools with "auto" tool choice
478
+ elif tool_choice_auto:
479
+ assert previous_texts is not None
480
+ assert all_previous_token_ids is not None
481
+ assert tool_parser is not None
482
+ #TODO optimize manipulation of these lists
483
+ previous_text = previous_texts[i]
484
+ previous_token_ids = all_previous_token_ids[i]
485
+ current_text = previous_text + delta_text
486
+ current_token_ids = previous_token_ids + list(
487
+ output.token_ids)
488
+
489
+ delta_message = (
490
+ tool_parser.extract_tool_calls_streaming(
491
+ previous_text=previous_text,
492
+ current_text=current_text,
493
+ delta_text=delta_text,
494
+ previous_token_ids=previous_token_ids,
495
+ current_token_ids=current_token_ids,
496
+ delta_token_ids=output.token_ids,
497
+ request=request))
498
+
499
+ # update the previous values for the next iteration
500
+ previous_texts[i] = current_text
501
+ all_previous_token_ids[i] = current_token_ids
502
+ # reasoning_content cannot be enabled with tool_choice.
503
+ # If it is, the tool_choice will be used instead.
504
+ elif self.enable_reasoning:
505
+ # handle reasoning_content delta
506
+ assert reasoning_parser is not None
507
+ assert previous_texts is not None
508
+ assert all_previous_token_ids is not None
509
+ previous_text = previous_texts[i]
510
+ previous_token_ids = all_previous_token_ids[i]
511
+ current_text = previous_text + delta_text
512
+ current_token_ids = previous_token_ids + list(
513
+ output.token_ids)
514
+
515
+ delta_message = (reasoning_parser.
516
+ extract_reasoning_content_streaming(
517
+ previous_text,
518
+ current_text,
519
+ delta_text,
520
+ previous_token_ids,
521
+ current_token_ids,
522
+ output.token_ids,
523
+ ))
524
+
525
+ # update the previous values for the next iteration
526
+ previous_texts[i] = current_text
527
+ all_previous_token_ids[i] = current_token_ids
528
+
529
+ # handle streaming just a content delta
530
+ else:
531
+ delta_message = DeltaMessage(content=delta_text)
532
+
533
+ # set the previous values for the next iteration
534
+ previous_num_tokens[i] += len(output.token_ids)
535
+
536
+ # if the message delta is None (e.g. because it was a
537
+ # "control token" for tool calls or the parser otherwise
538
+ # wasn't ready to send a token, then
539
+ # get the next token without streaming a chunk
540
+ if delta_message is None:
541
+ continue
542
+
543
+ if output.finish_reason is None:
544
+ # Send token-by-token response for each request.n
545
+ choice_data = ChatCompletionResponseStreamChoice(
546
+ index=i,
547
+ delta=delta_message,
548
+ logprobs=logprobs,
549
+ finish_reason=None)
550
+
551
+ # if the model is finished generating
552
+ else:
553
+ # check to make sure we haven't "forgotten" to stream
554
+ # any tokens that were generated but previously
555
+ # matched by partial json parsing
556
+ # only happens if we are NOT using guided decoding
557
+ auto_tools_called = False
558
+ if tool_parser:
559
+ auto_tools_called = len(
560
+ tool_parser.prev_tool_call_arr) > 0
561
+ index = len(tool_parser.prev_tool_call_arr
562
+ ) - 1 if auto_tools_called else 0
563
+ else:
564
+ index = 0
565
+
566
+ if self._should_check_for_unstreamed_tool_arg_tokens(
567
+ delta_message, output) and tool_parser:
568
+ latest_delta_len = 0
569
+ if ((isinstance(
570
+ delta_message.tool_calls[0].function,
571
+ DeltaFunctionCall)) and isinstance(
572
+ delta_message.tool_calls[0].function.
573
+ arguments, str)):
574
+ latest_delta_len = len(
575
+ delta_message.tool_calls[0].function.
576
+ arguments)
577
+
578
+ # get the expected call based on partial JSON
579
+ # parsing which "autocompletes" the JSON
580
+ expected_call = json.dumps(
581
+ tool_parser.prev_tool_call_arr[index].get(
582
+ "arguments", {}),
583
+ ensure_ascii=False)
584
+
585
+ # get what we've streamed so far for arguments
586
+ # for the current tool
587
+ actual_call = tool_parser.streamed_args_for_tool[
588
+ index]
589
+ if (latest_delta_len > 0):
590
+ actual_call = actual_call[:-latest_delta_len]
591
+
592
+ # check to see if there's anything left to stream
593
+ remaining_call = expected_call.replace(
594
+ actual_call, "", 1)
595
+ # set that as a delta message
596
+ delta_message = DeltaMessage(tool_calls=[
597
+ DeltaToolCall(index=index,
598
+ function=DeltaFunctionCall(
599
+ arguments=remaining_call).
600
+ model_dump(exclude_none=True))
601
+ ])
602
+
603
+ # Send the finish response for each request.n only once
604
+ choice_data = ChatCompletionResponseStreamChoice(
605
+ index=i,
606
+ delta=delta_message,
607
+ logprobs=logprobs,
608
+ finish_reason=output.finish_reason
609
+ if not auto_tools_called else "tool_calls",
610
+ stop_reason=output.stop_reason)
611
+
612
+ finish_reason_sent[i] = True
613
+
614
+ chunk = ChatCompletionStreamResponse(
615
+ id=request_id,
616
+ object=chunk_object_type,
617
+ created=created_time,
618
+ choices=[choice_data],
619
+ model=model_name)
620
+
621
+ # handle usage stats if requested & if continuous
622
+ if include_continuous_usage:
623
+ completion_tokens = previous_num_tokens[i]
624
+ chunk.usage = UsageInfo(
625
+ prompt_tokens=num_prompt_tokens,
626
+ completion_tokens=completion_tokens,
627
+ total_tokens=num_prompt_tokens + completion_tokens,
628
+ )
629
+
630
+ data = chunk.model_dump_json(exclude_unset=True)
631
+ yield f"data: {data}\n\n"
632
+
633
+ # once the final token is handled, if stream_options.include_usage
634
+ # is sent, send the usage
635
+ if include_usage:
636
+ completion_tokens = sum(previous_num_tokens)
637
+ final_usage = UsageInfo(prompt_tokens=num_prompt_tokens,
638
+ completion_tokens=completion_tokens,
639
+ total_tokens=num_prompt_tokens +
640
+ completion_tokens)
641
+ if self.enable_prompt_tokens_details and num_cached_tokens:
642
+ final_usage.prompt_tokens_details = PromptTokenUsageInfo(
643
+ cached_tokens=num_cached_tokens)
644
+
645
+ final_usage_chunk = ChatCompletionStreamResponse(
646
+ id=request_id,
647
+ object=chunk_object_type,
648
+ created=created_time,
649
+ choices=[],
650
+ model=model_name,
651
+ usage=final_usage)
652
+ final_usage_data = (final_usage_chunk.model_dump_json(
653
+ exclude_unset=True, exclude_none=True))
654
+ yield f"data: {final_usage_data}\n\n"
655
+
656
+ # report to FastAPI middleware aggregate usage across all choices
657
+ num_completion_tokens = sum(previous_num_tokens)
658
+ request_metadata.final_usage_info = UsageInfo(
659
+ prompt_tokens=num_prompt_tokens,
660
+ completion_tokens=num_completion_tokens,
661
+ total_tokens=num_prompt_tokens + num_completion_tokens)
662
+
663
+ except Exception as e:
664
+ # TODO: Use a vllm-specific Validation Error
665
+ logger.exception("Error in chat completion stream generator.")
666
+ data = self.create_streaming_error_response(str(e))
667
+ yield f"data: {data}\n\n"
668
+ # Send the final done message after all response.n are finished
669
+ yield "data: [DONE]\n\n"
670
+
671
+ async def chat_completion_full_generator(
672
+ self,
673
+ request: ChatCompletionRequest,
674
+ result_generator: AsyncIterator[RequestOutput],
675
+ request_id: str,
676
+ model_name: str,
677
+ conversation: List[ConversationMessage],
678
+ tokenizer: AnyTokenizer,
679
+ request_metadata: RequestResponseMetadata,
680
+ ) -> Union[ErrorResponse, ChatCompletionResponse]:
681
+
682
+ created_time = int(time.time())
683
+ final_res: Optional[RequestOutput] = None
684
+
685
+ try:
686
+ async for res in result_generator:
687
+ final_res = res
688
+ except asyncio.CancelledError:
689
+ return self.create_error_response("Client disconnected")
690
+ except ValueError as e:
691
+ # TODO: Use a vllm-specific Validation Error
692
+ return self.create_error_response(str(e))
693
+
694
+ assert final_res is not None
695
+
696
+ choices: List[ChatCompletionResponseChoice] = []
697
+
698
+ role = self.get_chat_request_role(request)
699
+ for output in final_res.outputs:
700
+ token_ids = output.token_ids
701
+ out_logprobs = output.logprobs
702
+
703
+ if request.logprobs and request.top_logprobs is not None:
704
+ assert out_logprobs is not None, "Did not output logprobs"
705
+ logprobs = self._create_chat_logprobs(
706
+ token_ids=token_ids,
707
+ top_logprobs=out_logprobs,
708
+ num_output_top_logprobs=request.top_logprobs,
709
+ tokenizer=tokenizer,
710
+ )
711
+ else:
712
+ logprobs = None
713
+
714
+ should_stream_with_reasoning_parsing = (
715
+ self._should_stream_with_reasoning_parsing(request))
716
+
717
+ # In the OpenAI API the finish_reason is "tools_called"
718
+ # if the tool choice is auto and the model produced a tool
719
+ # call. The same is not true for named function calls
720
+ auto_tools_called = False
721
+
722
+ if should_stream_with_reasoning_parsing and \
723
+ self.reasoning_parser is not None:
724
+ try:
725
+ reasoning_parser = self.reasoning_parser(tokenizer)
726
+ except RuntimeError as e:
727
+ logger.exception("Error in reasoning parser creation.")
728
+ return self.create_error_response(str(e))
729
+
730
+ reasoning_content, content = (
731
+ reasoning_parser.extract_reasoning_content(
732
+ output.text, request=request))
733
+
734
+ if reasoning_content:
735
+ message = ChatMessage(role=role,
736
+ content=content,
737
+ reasoning_content=reasoning_content)
738
+ else:
739
+ message = ChatMessage(role=role, content=output.text)
740
+
741
+ # if auto tools are not enabled, and a named tool choice using
742
+ # outlines is not being used
743
+ elif (not self.enable_auto_tools
744
+ or not self.tool_parser) and not isinstance(
745
+ request.tool_choice, ChatCompletionNamedToolChoiceParam):
746
+ message = ChatMessage(role=role, content=output.text)
747
+
748
+ # if the request uses tools and specified a tool choice
749
+ elif request.tool_choice and type(
750
+ request.tool_choice) is ChatCompletionNamedToolChoiceParam:
751
+
752
+ tool_call_class = MistralToolCall if isinstance(
753
+ tokenizer, MistralTokenizer) else ToolCall
754
+ message = ChatMessage(
755
+ role=role,
756
+ content="",
757
+ tool_calls=[
758
+ tool_call_class(function=FunctionCall(
759
+ name=request.tool_choice.function.name,
760
+ arguments=output.text))
761
+ ])
762
+
763
+ # if the request doesn't use tool choice
764
+ # OR specifies to not use a tool
765
+ elif not request.tool_choice or request.tool_choice == "none":
766
+
767
+ message = ChatMessage(role=role, content=output.text)
768
+
769
+ # handle when there are tools and tool choice is auto
770
+ elif request.tools and (
771
+ request.tool_choice == "auto"
772
+ or request.tool_choice is None) and self.enable_auto_tools \
773
+ and self.tool_parser:
774
+
775
+ try:
776
+ tool_parser = self.tool_parser(tokenizer)
777
+ except RuntimeError as e:
778
+ logger.exception("Error in tool parser creation.")
779
+ return self.create_error_response(str(e))
780
+
781
+ tool_call_info = tool_parser.extract_tool_calls(
782
+ output.text, request=request)
783
+ # In the OpenAI API the finish_reason is "tools_called"
784
+ # if the tool choice is auto and the model produced a tool
785
+ # call. The same is not true for named function calls
786
+ auto_tools_called = tool_call_info.tools_called
787
+ if tool_call_info.tools_called:
788
+ message = ChatMessage(role=role,
789
+ content=tool_call_info.content,
790
+ tool_calls=tool_call_info.tool_calls)
791
+
792
+ else:
793
+ # FOR NOW make it a chat message; we will have to detect
794
+ # the type to make it later.
795
+ message = ChatMessage(role=role, content=output.text)
796
+
797
+ # undetermined case that is still important to handle
798
+ else:
799
+ logger.error(
800
+ "Error in chat_completion_full_generator - cannot determine"
801
+ " if tools should be extracted. Returning a standard chat "
802
+ "completion.")
803
+ message = ChatMessage(role=role, content=output.text)
804
+
805
+ choice_data = ChatCompletionResponseChoice(
806
+ index=output.index,
807
+ message=message,
808
+ logprobs=logprobs,
809
+ finish_reason="tool_calls" if auto_tools_called else
810
+ output.finish_reason if output.finish_reason else "stop",
811
+ stop_reason=output.stop_reason)
812
+ choices.append(choice_data)
813
+
814
+ if request.echo:
815
+ last_msg_content: Union[str, List[Dict[str, str]]] = ""
816
+ if conversation and "content" in conversation[-1] and conversation[
817
+ -1].get("role") == role:
818
+ last_msg_content = conversation[-1]["content"] or ""
819
+ if isinstance(last_msg_content, list):
820
+ last_msg_content = "\n".join(msg['text']
821
+ for msg in last_msg_content)
822
+
823
+ for choice in choices:
824
+ full_message = last_msg_content + (choice.message.content
825
+ or "")
826
+ choice.message.content = full_message
827
+
828
+ assert final_res.prompt_token_ids is not None
829
+ num_prompt_tokens = len(final_res.prompt_token_ids)
830
+ if final_res.encoder_prompt_token_ids is not None:
831
+ num_prompt_tokens += len(final_res.encoder_prompt_token_ids)
832
+ num_generated_tokens = sum(
833
+ len(output.token_ids) for output in final_res.outputs)
834
+ usage = UsageInfo(prompt_tokens=num_prompt_tokens,
835
+ completion_tokens=num_generated_tokens,
836
+ total_tokens=num_prompt_tokens +
837
+ num_generated_tokens)
838
+ if self.enable_prompt_tokens_details and final_res.num_cached_tokens:
839
+ usage.prompt_tokens_details = PromptTokenUsageInfo(
840
+ cached_tokens=final_res.num_cached_tokens)
841
+
842
+ request_metadata.final_usage_info = usage
843
+
844
+ response = ChatCompletionResponse(
845
+ id=request_id,
846
+ created=created_time,
847
+ model=model_name,
848
+ choices=choices,
849
+ usage=usage,
850
+ prompt_logprobs=final_res.prompt_logprobs,
851
+ )
852
+
853
+ return response
854
+
855
+ def _get_top_logprobs(
856
+ self, logprobs: Dict[int, Logprob], top_logprobs: Optional[int],
857
+ tokenizer: AnyTokenizer) -> List[ChatCompletionLogProb]:
858
+ return [
859
+ ChatCompletionLogProb(token=(token := self._get_decoded_token(
860
+ p[1],
861
+ p[0],
862
+ tokenizer,
863
+ return_as_token_id=self.return_tokens_as_token_ids)),
864
+ logprob=max(p[1].logprob, -9999.0),
865
+ bytes=list(
866
+ token.encode("utf-8", errors="replace")))
867
+ for i, p in enumerate(logprobs.items())
868
+ if top_logprobs and i < top_logprobs
869
+ ]
870
+
871
+ def _create_chat_logprobs(
872
+ self,
873
+ token_ids: GenericSequence[int],
874
+ top_logprobs: GenericSequence[Optional[Dict[int, Logprob]]],
875
+ tokenizer: AnyTokenizer,
876
+ num_output_top_logprobs: Optional[int] = None,
877
+ ) -> ChatCompletionLogProbs:
878
+ """Create OpenAI-style logprobs."""
879
+ logprobs_content: List[ChatCompletionLogProbsContent] = []
880
+
881
+ for i, token_id in enumerate(token_ids):
882
+ step_top_logprobs = top_logprobs[i]
883
+ if step_top_logprobs is None:
884
+ token = tokenizer.decode(token_id)
885
+ if self.return_tokens_as_token_ids:
886
+ token = f"token_id:{token_id}"
887
+
888
+ logprobs_content.append(
889
+ ChatCompletionLogProbsContent(
890
+ token=token,
891
+ bytes=list(token.encode("utf-8", errors="replace")),
892
+ ))
893
+ else:
894
+ step_token = step_top_logprobs[token_id]
895
+ step_decoded = step_token.decoded_token
896
+
897
+ logprobs_content.append(
898
+ ChatCompletionLogProbsContent(
899
+ token=self._get_decoded_token(
900
+ step_token,
901
+ token_id,
902
+ tokenizer,
903
+ self.return_tokens_as_token_ids,
904
+ ),
905
+ logprob=max(step_token.logprob, -9999.0),
906
+ bytes=None if step_decoded is None else list(
907
+ step_decoded.encode("utf-8", errors="replace")),
908
+ top_logprobs=self._get_top_logprobs(
909
+ step_top_logprobs,
910
+ num_output_top_logprobs,
911
+ tokenizer,
912
+ ),
913
+ ))
914
+
915
+ return ChatCompletionLogProbs(content=logprobs_content)
916
+
917
+ def _should_stream_with_auto_tool_parsing(self,
918
+ request: ChatCompletionRequest):
919
+ """
920
+ Utility function to check if streamed tokens should go through the tool
921
+ call parser that was configured.
922
+
923
+ We only want to do this IF user-provided tools are set, a tool parser
924
+ is configured, "auto" tool choice is enabled, and the request's tool
925
+ choice field indicates that "auto" tool choice should be used.
926
+ """
927
+ return (request.tools and self.tool_parser and self.enable_auto_tools
928
+ and request.tool_choice in ['auto', None])
929
+
930
+ def _should_stream_with_reasoning_parsing(self,
931
+ request: ChatCompletionRequest):
932
+ """
933
+ Utility function to check if streamed tokens should go through the
934
+ reasoning parser that was configured.
935
+
936
+ We only want to do this IF reasoning is enabled and a reasoning
937
+ parser is configured.
938
+ """
939
+ return self.enable_reasoning and self.reasoning_parser is not None
940
+
941
+ def _should_check_for_unstreamed_tool_arg_tokens(
942
+ self,
943
+ delta_message: Optional[DeltaMessage],
944
+ output: CompletionOutput,
945
+ ) -> bool:
946
+ """
947
+ Check to see if we should check for unstreamed tool arguments tokens.
948
+ This is only applicable when auto tool parsing is enabled, the delta
949
+ is a tool call with arguments.
950
+ """
951
+
952
+ # yapf: disable
953
+ return bool(
954
+ # if there is a delta message that includes tool calls which
955
+ # include a function that has arguments
956
+ output.finish_reason is not None
957
+ and self.enable_auto_tools and self.tool_parser and delta_message
958
+ and delta_message.tool_calls and delta_message.tool_calls[0]
959
+ and delta_message.tool_calls[0].function
960
+ and delta_message.tool_calls[0].function.arguments is not None
961
+ )
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_completion.py ADDED
@@ -0,0 +1,547 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import time
5
+ from typing import AsyncGenerator, AsyncIterator, Dict, List, Optional
6
+ from typing import Sequence as GenericSequence
7
+ from typing import Tuple, Union, cast
8
+
9
+ from fastapi import Request
10
+
11
+ from vllm.config import ModelConfig
12
+ from vllm.engine.protocol import EngineClient
13
+ from vllm.entrypoints.logger import RequestLogger
14
+ # yapf conflicts with isort for this block
15
+ # yapf: disable
16
+ from vllm.entrypoints.openai.protocol import (CompletionLogProbs,
17
+ CompletionRequest,
18
+ CompletionResponse,
19
+ CompletionResponseChoice,
20
+ CompletionResponseStreamChoice,
21
+ CompletionStreamResponse,
22
+ ErrorResponse,
23
+ RequestResponseMetadata,
24
+ UsageInfo)
25
+ # yapf: enable
26
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
27
+ from vllm.entrypoints.openai.serving_models import OpenAIServingModels
28
+ from vllm.logger import init_logger
29
+ from vllm.outputs import RequestOutput
30
+ from vllm.sampling_params import BeamSearchParams, SamplingParams
31
+ from vllm.sequence import Logprob
32
+ from vllm.transformers_utils.tokenizer import AnyTokenizer
33
+ from vllm.utils import merge_async_iterators
34
+
35
+ logger = init_logger(__name__)
36
+
37
+
38
+ class OpenAIServingCompletion(OpenAIServing):
39
+
40
+ def __init__(
41
+ self,
42
+ engine_client: EngineClient,
43
+ model_config: ModelConfig,
44
+ models: OpenAIServingModels,
45
+ *,
46
+ request_logger: Optional[RequestLogger],
47
+ return_tokens_as_token_ids: bool = False,
48
+ ):
49
+ super().__init__(engine_client=engine_client,
50
+ model_config=model_config,
51
+ models=models,
52
+ request_logger=request_logger,
53
+ return_tokens_as_token_ids=return_tokens_as_token_ids)
54
+ diff_sampling_param = self.model_config.get_diff_sampling_param()
55
+ if diff_sampling_param:
56
+ logger.info(
57
+ "Overwriting default completion sampling param with: %s",
58
+ diff_sampling_param)
59
+
60
+ async def create_completion(
61
+ self,
62
+ request: CompletionRequest,
63
+ raw_request: Optional[Request] = None,
64
+ ) -> Union[AsyncGenerator[str, None], CompletionResponse, ErrorResponse]:
65
+ """Completion API similar to OpenAI's API.
66
+
67
+ See https://platform.openai.com/docs/api-reference/completions/create
68
+ for the API specification. This API mimics the OpenAI Completion API.
69
+
70
+ NOTE: Currently we do not support the following feature:
71
+ - suffix (the language models we currently support do not support
72
+ suffix)
73
+ """
74
+ error_check_ret = await self._check_model(request)
75
+ if error_check_ret is not None:
76
+ return error_check_ret
77
+
78
+ # If the engine is dead, raise the engine's DEAD_ERROR.
79
+ # This is required for the streaming case, where we return a
80
+ # success status before we actually start generating text :).
81
+ if self.engine_client.errored:
82
+ raise self.engine_client.dead_error
83
+
84
+ # Return error for unsupported features.
85
+ if request.suffix is not None:
86
+ return self.create_error_response(
87
+ "suffix is not currently supported")
88
+
89
+ request_id = f"cmpl-{self._base_request_id(raw_request)}"
90
+ created_time = int(time.time())
91
+
92
+ request_metadata = RequestResponseMetadata(request_id=request_id)
93
+ if raw_request:
94
+ raw_request.state.request_metadata = request_metadata
95
+
96
+ try:
97
+ (
98
+ lora_request,
99
+ prompt_adapter_request,
100
+ ) = self._maybe_get_adapters(request)
101
+
102
+ tokenizer = await self.engine_client.get_tokenizer(lora_request)
103
+
104
+ request_prompts, engine_prompts = await self._preprocess_completion(
105
+ request,
106
+ tokenizer,
107
+ request.prompt,
108
+ truncate_prompt_tokens=request.truncate_prompt_tokens,
109
+ add_special_tokens=request.add_special_tokens,
110
+ )
111
+ except ValueError as e:
112
+ logger.exception("Error in preprocessing prompt inputs")
113
+ return self.create_error_response(str(e))
114
+
115
+ # Schedule the request and get the result generator.
116
+ generators: List[AsyncGenerator[RequestOutput, None]] = []
117
+ try:
118
+ for i, engine_prompt in enumerate(engine_prompts):
119
+ sampling_params: Union[SamplingParams, BeamSearchParams]
120
+ default_max_tokens = self.max_model_len - len(
121
+ engine_prompt["prompt_token_ids"])
122
+ # Build default sampling params
123
+ default_sampling_params = (
124
+ self.model_config.get_diff_sampling_param())
125
+ if request.use_beam_search:
126
+ sampling_params = request.to_beam_search_params(
127
+ default_max_tokens, default_sampling_params)
128
+ else:
129
+ sampling_params = request.to_sampling_params(
130
+ default_max_tokens,
131
+ self.model_config.logits_processor_pattern,
132
+ default_sampling_params)
133
+
134
+ request_id_item = f"{request_id}-{i}"
135
+
136
+ self._log_inputs(request_id_item,
137
+ request_prompts[i],
138
+ params=sampling_params,
139
+ lora_request=lora_request,
140
+ prompt_adapter_request=prompt_adapter_request)
141
+
142
+ trace_headers = (None if raw_request is None else await
143
+ self._get_trace_headers(raw_request.headers))
144
+
145
+ if isinstance(sampling_params, BeamSearchParams):
146
+ generator = self.engine_client.beam_search(
147
+ prompt=engine_prompt,
148
+ request_id=request_id,
149
+ params=sampling_params,
150
+ )
151
+ else:
152
+ generator = self.engine_client.generate(
153
+ engine_prompt,
154
+ sampling_params,
155
+ request_id_item,
156
+ lora_request=lora_request,
157
+ prompt_adapter_request=prompt_adapter_request,
158
+ trace_headers=trace_headers,
159
+ priority=request.priority,
160
+ )
161
+
162
+ generators.append(generator)
163
+ except ValueError as e:
164
+ # TODO: Use a vllm-specific Validation Error
165
+ return self.create_error_response(str(e))
166
+
167
+ result_generator = merge_async_iterators(*generators)
168
+
169
+ model_name = self.models.model_name(lora_request)
170
+ num_prompts = len(engine_prompts)
171
+
172
+ # Similar to the OpenAI API, when n != best_of, we do not stream the
173
+ # results. In addition, we do not stream the results when use
174
+ # beam search.
175
+ stream = (request.stream
176
+ and (request.best_of is None or request.n == request.best_of)
177
+ and not request.use_beam_search)
178
+
179
+ # Streaming response
180
+ if stream:
181
+ return self.completion_stream_generator(
182
+ request,
183
+ result_generator,
184
+ request_id,
185
+ created_time,
186
+ model_name,
187
+ num_prompts=num_prompts,
188
+ tokenizer=tokenizer,
189
+ request_metadata=request_metadata)
190
+
191
+ # Non-streaming response
192
+ final_res_batch: List[Optional[RequestOutput]] = [None] * num_prompts
193
+ try:
194
+ async for i, res in result_generator:
195
+ final_res_batch[i] = res
196
+
197
+ for i, final_res in enumerate(final_res_batch):
198
+ assert final_res is not None
199
+
200
+ # The output should contain the input text
201
+ # We did not pass it into vLLM engine to avoid being redundant
202
+ # with the inputs token IDs
203
+ if final_res.prompt is None:
204
+ final_res.prompt = request_prompts[i]["prompt"]
205
+
206
+ final_res_batch_checked = cast(List[RequestOutput],
207
+ final_res_batch)
208
+
209
+ response = self.request_output_to_completion_response(
210
+ final_res_batch_checked,
211
+ request,
212
+ request_id,
213
+ created_time,
214
+ model_name,
215
+ tokenizer,
216
+ request_metadata,
217
+ )
218
+ except asyncio.CancelledError:
219
+ return self.create_error_response("Client disconnected")
220
+ except ValueError as e:
221
+ # TODO: Use a vllm-specific Validation Error
222
+ return self.create_error_response(str(e))
223
+
224
+ # When user requests streaming but we don't stream, we still need to
225
+ # return a streaming response with a single event.
226
+ if request.stream:
227
+ response_json = response.model_dump_json()
228
+
229
+ async def fake_stream_generator() -> AsyncGenerator[str, None]:
230
+ yield f"data: {response_json}\n\n"
231
+ yield "data: [DONE]\n\n"
232
+
233
+ return fake_stream_generator()
234
+
235
+ return response
236
+
237
+ async def completion_stream_generator(
238
+ self,
239
+ request: CompletionRequest,
240
+ result_generator: AsyncIterator[Tuple[int, RequestOutput]],
241
+ request_id: str,
242
+ created_time: int,
243
+ model_name: str,
244
+ num_prompts: int,
245
+ tokenizer: AnyTokenizer,
246
+ request_metadata: RequestResponseMetadata,
247
+ ) -> AsyncGenerator[str, None]:
248
+ num_choices = 1 if request.n is None else request.n
249
+ previous_text_lens = [0] * num_choices * num_prompts
250
+ previous_num_tokens = [0] * num_choices * num_prompts
251
+ has_echoed = [False] * num_choices * num_prompts
252
+ num_prompt_tokens = [0] * num_prompts
253
+
254
+ stream_options = request.stream_options
255
+ if stream_options:
256
+ include_usage = stream_options.include_usage
257
+ include_continuous_usage = include_usage and \
258
+ stream_options.continuous_usage_stats
259
+ else:
260
+ include_usage, include_continuous_usage = False, False
261
+
262
+ try:
263
+ async for prompt_idx, res in result_generator:
264
+ prompt_token_ids = res.prompt_token_ids
265
+ prompt_logprobs = res.prompt_logprobs
266
+ prompt_text = res.prompt
267
+
268
+ # Prompt details are excluded from later streamed outputs
269
+ if res.prompt_token_ids is not None:
270
+ num_prompt_tokens[prompt_idx] = len(res.prompt_token_ids)
271
+
272
+ delta_token_ids: GenericSequence[int]
273
+ out_logprobs: Optional[GenericSequence[Optional[Dict[
274
+ int, Logprob]]]]
275
+
276
+ for output in res.outputs:
277
+ i = output.index + prompt_idx * num_choices
278
+
279
+ assert request.max_tokens is not None
280
+ if request.echo and not has_echoed[i]:
281
+ assert prompt_token_ids is not None
282
+ assert prompt_text is not None
283
+ if request.max_tokens == 0:
284
+ # only return the prompt
285
+ delta_text = prompt_text
286
+ delta_token_ids = prompt_token_ids
287
+ out_logprobs = prompt_logprobs
288
+ else:
289
+ assert prompt_logprobs is not None
290
+ # echo the prompt and first token
291
+ delta_text = prompt_text + output.text
292
+ delta_token_ids = [
293
+ *prompt_token_ids, *output.token_ids
294
+ ]
295
+ out_logprobs = [
296
+ *prompt_logprobs,
297
+ *(output.logprobs or []),
298
+ ]
299
+ has_echoed[i] = True
300
+ else:
301
+ # return just the delta
302
+ delta_text = output.text
303
+ delta_token_ids = output.token_ids
304
+ out_logprobs = output.logprobs
305
+
306
+ if not delta_text and not delta_token_ids \
307
+ and not previous_num_tokens[i]:
308
+ # Chunked prefill case, don't return empty chunks
309
+ continue
310
+
311
+ if request.logprobs is not None:
312
+ assert out_logprobs is not None, (
313
+ "Did not output logprobs")
314
+ logprobs = self._create_completion_logprobs(
315
+ token_ids=delta_token_ids,
316
+ top_logprobs=out_logprobs,
317
+ num_output_top_logprobs=request.logprobs,
318
+ tokenizer=tokenizer,
319
+ initial_text_offset=previous_text_lens[i],
320
+ )
321
+ else:
322
+ logprobs = None
323
+
324
+ previous_text_lens[i] += len(output.text)
325
+ previous_num_tokens[i] += len(output.token_ids)
326
+ finish_reason = output.finish_reason
327
+ stop_reason = output.stop_reason
328
+
329
+ chunk = CompletionStreamResponse(
330
+ id=request_id,
331
+ created=created_time,
332
+ model=model_name,
333
+ choices=[
334
+ CompletionResponseStreamChoice(
335
+ index=i,
336
+ text=delta_text,
337
+ logprobs=logprobs,
338
+ finish_reason=finish_reason,
339
+ stop_reason=stop_reason,
340
+ )
341
+ ])
342
+ if include_continuous_usage:
343
+ prompt_tokens = num_prompt_tokens[prompt_idx]
344
+ completion_tokens = previous_num_tokens[i]
345
+ chunk.usage = UsageInfo(
346
+ prompt_tokens=prompt_tokens,
347
+ completion_tokens=completion_tokens,
348
+ total_tokens=prompt_tokens + completion_tokens,
349
+ )
350
+
351
+ response_json = chunk.model_dump_json(exclude_unset=False)
352
+ yield f"data: {response_json}\n\n"
353
+
354
+ total_prompt_tokens = sum(num_prompt_tokens)
355
+ total_completion_tokens = sum(previous_num_tokens)
356
+ final_usage_info = UsageInfo(
357
+ prompt_tokens=total_prompt_tokens,
358
+ completion_tokens=total_completion_tokens,
359
+ total_tokens=total_prompt_tokens + total_completion_tokens)
360
+
361
+ if include_usage:
362
+ final_usage_chunk = CompletionStreamResponse(
363
+ id=request_id,
364
+ created=created_time,
365
+ model=model_name,
366
+ choices=[],
367
+ usage=final_usage_info,
368
+ )
369
+ final_usage_data = (final_usage_chunk.model_dump_json(
370
+ exclude_unset=False, exclude_none=True))
371
+ yield f"data: {final_usage_data}\n\n"
372
+
373
+ # report to FastAPI middleware aggregate usage across all choices
374
+ request_metadata.final_usage_info = final_usage_info
375
+
376
+ except Exception as e:
377
+ # TODO: Use a vllm-specific Validation Error
378
+ data = self.create_streaming_error_response(str(e))
379
+ yield f"data: {data}\n\n"
380
+ yield "data: [DONE]\n\n"
381
+
382
+ def request_output_to_completion_response(
383
+ self,
384
+ final_res_batch: List[RequestOutput],
385
+ request: CompletionRequest,
386
+ request_id: str,
387
+ created_time: int,
388
+ model_name: str,
389
+ tokenizer: AnyTokenizer,
390
+ request_metadata: RequestResponseMetadata,
391
+ ) -> CompletionResponse:
392
+ choices: List[CompletionResponseChoice] = []
393
+ num_prompt_tokens = 0
394
+ num_generated_tokens = 0
395
+
396
+ for final_res in final_res_batch:
397
+ prompt_token_ids = final_res.prompt_token_ids
398
+ assert prompt_token_ids is not None
399
+ prompt_logprobs = final_res.prompt_logprobs
400
+ if prompt_logprobs:
401
+ for logprob_dict in prompt_logprobs:
402
+ if logprob_dict:
403
+ for logprob_values in logprob_dict.values():
404
+ if logprob_values.logprob == float('-inf'):
405
+ logprob_values.logprob = -9999.0
406
+ prompt_text = final_res.prompt
407
+
408
+ token_ids: GenericSequence[int]
409
+ out_logprobs: Optional[GenericSequence[Optional[Dict[int,
410
+ Logprob]]]]
411
+
412
+ for output in final_res.outputs:
413
+ assert request.max_tokens is not None
414
+ if request.echo:
415
+ assert prompt_text is not None
416
+ if request.max_tokens == 0:
417
+ token_ids = prompt_token_ids
418
+ out_logprobs = prompt_logprobs
419
+ output_text = prompt_text
420
+ else:
421
+ token_ids = [*prompt_token_ids, *output.token_ids]
422
+
423
+ if request.logprobs is None:
424
+ out_logprobs = None
425
+ else:
426
+ assert prompt_logprobs is not None
427
+ assert output.logprobs is not None
428
+ out_logprobs = [
429
+ *prompt_logprobs,
430
+ *output.logprobs,
431
+ ]
432
+
433
+ output_text = prompt_text + output.text
434
+ else:
435
+ token_ids = output.token_ids
436
+ out_logprobs = output.logprobs
437
+ output_text = output.text
438
+
439
+ if request.logprobs is not None:
440
+ assert out_logprobs is not None, "Did not output logprobs"
441
+ logprobs = self._create_completion_logprobs(
442
+ token_ids=token_ids,
443
+ top_logprobs=out_logprobs,
444
+ tokenizer=tokenizer,
445
+ num_output_top_logprobs=request.logprobs,
446
+ )
447
+ else:
448
+ logprobs = None
449
+
450
+ choice_data = CompletionResponseChoice(
451
+ index=len(choices),
452
+ text=output_text,
453
+ logprobs=logprobs,
454
+ finish_reason=output.finish_reason,
455
+ stop_reason=output.stop_reason,
456
+ prompt_logprobs=final_res.prompt_logprobs,
457
+ )
458
+ choices.append(choice_data)
459
+
460
+ num_generated_tokens += len(output.token_ids)
461
+
462
+ num_prompt_tokens += len(prompt_token_ids)
463
+
464
+ usage = UsageInfo(
465
+ prompt_tokens=num_prompt_tokens,
466
+ completion_tokens=num_generated_tokens,
467
+ total_tokens=num_prompt_tokens + num_generated_tokens,
468
+ )
469
+
470
+ request_metadata.final_usage_info = usage
471
+
472
+ return CompletionResponse(
473
+ id=request_id,
474
+ created=created_time,
475
+ model=model_name,
476
+ choices=choices,
477
+ usage=usage,
478
+ )
479
+
480
+ def _create_completion_logprobs(
481
+ self,
482
+ token_ids: GenericSequence[int],
483
+ top_logprobs: GenericSequence[Optional[Dict[int, Logprob]]],
484
+ num_output_top_logprobs: int,
485
+ tokenizer: AnyTokenizer,
486
+ initial_text_offset: int = 0,
487
+ ) -> CompletionLogProbs:
488
+ """Create logprobs for OpenAI Completion API."""
489
+ out_text_offset: List[int] = []
490
+ out_token_logprobs: List[Optional[float]] = []
491
+ out_tokens: List[str] = []
492
+ out_top_logprobs: List[Optional[Dict[str, float]]] = []
493
+
494
+ last_token_len = 0
495
+
496
+ for i, token_id in enumerate(token_ids):
497
+ step_top_logprobs = top_logprobs[i]
498
+ if step_top_logprobs is None:
499
+ token = tokenizer.decode(token_id)
500
+ if self.return_tokens_as_token_ids:
501
+ token = f"token_id:{token_id}"
502
+
503
+ out_tokens.append(token)
504
+ out_token_logprobs.append(None)
505
+ out_top_logprobs.append(None)
506
+ else:
507
+ step_token = step_top_logprobs[token_id]
508
+
509
+ token = self._get_decoded_token(
510
+ step_token,
511
+ token_id,
512
+ tokenizer,
513
+ return_as_token_id=self.return_tokens_as_token_ids,
514
+ )
515
+ token_logprob = max(step_token.logprob, -9999.0)
516
+
517
+ out_tokens.append(token)
518
+ out_token_logprobs.append(token_logprob)
519
+
520
+ # makes sure to add the top num_output_top_logprobs + 1
521
+ # logprobs, as defined in the openai API
522
+ # (cf. https://github.com/openai/openai-openapi/blob/
523
+ # 893ba52242dbd5387a97b96444ee1c742cfce9bd/openapi.yaml#L7153)
524
+ out_top_logprobs.append({
525
+ # Convert float("-inf") to the
526
+ # JSON-serializable float that OpenAI uses
527
+ self._get_decoded_token(top_lp[1],
528
+ top_lp[0],
529
+ tokenizer,
530
+ return_as_token_id=self.return_tokens_as_token_ids):
531
+ max(top_lp[1].logprob, -9999.0)
532
+ for i, top_lp in enumerate(step_top_logprobs.items())
533
+ if num_output_top_logprobs >= i
534
+ })
535
+
536
+ if len(out_text_offset) == 0:
537
+ out_text_offset.append(initial_text_offset)
538
+ else:
539
+ out_text_offset.append(out_text_offset[-1] + last_token_len)
540
+ last_token_len = len(token)
541
+
542
+ return CompletionLogProbs(
543
+ text_offset=out_text_offset,
544
+ token_logprobs=out_token_logprobs,
545
+ tokens=out_tokens,
546
+ top_logprobs=out_top_logprobs,
547
+ )
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_embedding.py ADDED
@@ -0,0 +1,242 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import base64
5
+ import time
6
+ from typing import AsyncGenerator, Final, List, Literal, Optional, Union, cast
7
+
8
+ import numpy as np
9
+ from fastapi import Request
10
+ from typing_extensions import assert_never
11
+
12
+ from vllm.config import ModelConfig
13
+ from vllm.engine.protocol import EngineClient
14
+ from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption
15
+ from vllm.entrypoints.logger import RequestLogger
16
+ from vllm.entrypoints.openai.protocol import (EmbeddingChatRequest,
17
+ EmbeddingRequest,
18
+ EmbeddingResponse,
19
+ EmbeddingResponseData,
20
+ ErrorResponse, UsageInfo)
21
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
22
+ from vllm.entrypoints.openai.serving_models import OpenAIServingModels
23
+ from vllm.logger import init_logger
24
+ from vllm.outputs import (EmbeddingOutput, EmbeddingRequestOutput,
25
+ PoolingRequestOutput)
26
+ from vllm.utils import merge_async_iterators
27
+
28
+ logger = init_logger(__name__)
29
+
30
+
31
+ def _get_embedding(
32
+ output: EmbeddingOutput,
33
+ encoding_format: Literal["float", "base64"],
34
+ ) -> Union[List[float], str]:
35
+ if encoding_format == "float":
36
+ return output.embedding
37
+ elif encoding_format == "base64":
38
+ # Force to use float32 for base64 encoding
39
+ # to match the OpenAI python client behavior
40
+ embedding_bytes = np.array(output.embedding, dtype="float32").tobytes()
41
+ return base64.b64encode(embedding_bytes).decode("utf-8")
42
+
43
+ assert_never(encoding_format)
44
+
45
+
46
+ class OpenAIServingEmbedding(OpenAIServing):
47
+
48
+ def __init__(
49
+ self,
50
+ engine_client: EngineClient,
51
+ model_config: ModelConfig,
52
+ models: OpenAIServingModels,
53
+ *,
54
+ request_logger: Optional[RequestLogger],
55
+ chat_template: Optional[str],
56
+ chat_template_content_format: ChatTemplateContentFormatOption,
57
+ ) -> None:
58
+ super().__init__(engine_client=engine_client,
59
+ model_config=model_config,
60
+ models=models,
61
+ request_logger=request_logger)
62
+
63
+ self.chat_template = chat_template
64
+ self.chat_template_content_format: Final = chat_template_content_format
65
+
66
+ async def create_embedding(
67
+ self,
68
+ request: EmbeddingRequest,
69
+ raw_request: Optional[Request] = None,
70
+ ) -> Union[EmbeddingResponse, ErrorResponse]:
71
+ """
72
+ Embedding API similar to OpenAI's API.
73
+
74
+ See https://platform.openai.com/docs/api-reference/embeddings/create
75
+ for the API specification. This API mimics the OpenAI Embedding API.
76
+ """
77
+ error_check_ret = await self._check_model(request)
78
+ if error_check_ret is not None:
79
+ return error_check_ret
80
+
81
+ encoding_format = request.encoding_format
82
+ if request.dimensions is not None:
83
+ return self.create_error_response(
84
+ "dimensions is currently not supported")
85
+
86
+ model_name = request.model
87
+ request_id = f"embd-{self._base_request_id(raw_request)}"
88
+ created_time = int(time.time())
89
+
90
+ truncate_prompt_tokens = None
91
+
92
+ if request.truncate_prompt_tokens is not None:
93
+ if request.truncate_prompt_tokens <= self.max_model_len:
94
+ truncate_prompt_tokens = request.truncate_prompt_tokens
95
+ else:
96
+ return self.create_error_response(
97
+ "truncate_prompt_tokens value is "
98
+ "greater than max_model_len."
99
+ " Please, select a smaller truncation size.")
100
+
101
+ try:
102
+ (
103
+ lora_request,
104
+ prompt_adapter_request,
105
+ ) = self._maybe_get_adapters(request)
106
+
107
+ tokenizer = await self.engine_client.get_tokenizer(lora_request)
108
+
109
+ if prompt_adapter_request is not None:
110
+ raise NotImplementedError("Prompt adapter is not supported "
111
+ "for embedding models")
112
+
113
+ if isinstance(request, EmbeddingChatRequest):
114
+ (
115
+ _,
116
+ request_prompts,
117
+ engine_prompts,
118
+ ) = await self._preprocess_chat(
119
+ request,
120
+ tokenizer,
121
+ request.messages,
122
+ chat_template=request.chat_template or self.chat_template,
123
+ chat_template_content_format=self.
124
+ chat_template_content_format,
125
+ # In embedding requests, we are not generating tokens,
126
+ # so there is no need to append extra tokens to the input
127
+ add_generation_prompt=False,
128
+ continue_final_message=False,
129
+ truncate_prompt_tokens=truncate_prompt_tokens,
130
+ add_special_tokens=request.add_special_tokens,
131
+ )
132
+ else:
133
+ (request_prompts,
134
+ engine_prompts) = await self._preprocess_completion(
135
+ request,
136
+ tokenizer,
137
+ request.input,
138
+ truncate_prompt_tokens=truncate_prompt_tokens,
139
+ add_special_tokens=request.add_special_tokens,
140
+ )
141
+ except ValueError as e:
142
+ logger.exception("Error in preprocessing prompt inputs")
143
+ return self.create_error_response(str(e))
144
+
145
+ # Schedule the request and get the result generator.
146
+ generators: List[AsyncGenerator[PoolingRequestOutput, None]] = []
147
+ try:
148
+ pooling_params = request.to_pooling_params()
149
+
150
+ for i, engine_prompt in enumerate(engine_prompts):
151
+ request_id_item = f"{request_id}-{i}"
152
+
153
+ self._log_inputs(request_id_item,
154
+ request_prompts[i],
155
+ params=pooling_params,
156
+ lora_request=lora_request,
157
+ prompt_adapter_request=prompt_adapter_request)
158
+
159
+ trace_headers = (None if raw_request is None else await
160
+ self._get_trace_headers(raw_request.headers))
161
+
162
+ generator = self.engine_client.encode(
163
+ engine_prompt,
164
+ pooling_params,
165
+ request_id_item,
166
+ lora_request=lora_request,
167
+ trace_headers=trace_headers,
168
+ priority=request.priority,
169
+ )
170
+
171
+ generators.append(generator)
172
+ except ValueError as e:
173
+ # TODO: Use a vllm-specific Validation Error
174
+ return self.create_error_response(str(e))
175
+
176
+ result_generator = merge_async_iterators(*generators)
177
+
178
+ num_prompts = len(engine_prompts)
179
+
180
+ # Non-streaming response
181
+ final_res_batch: List[Optional[PoolingRequestOutput]]
182
+ final_res_batch = [None] * num_prompts
183
+ try:
184
+ async for i, res in result_generator:
185
+ final_res_batch[i] = res
186
+
187
+ assert all(final_res is not None for final_res in final_res_batch)
188
+
189
+ final_res_batch_checked = cast(List[PoolingRequestOutput],
190
+ final_res_batch)
191
+
192
+ response = self.request_output_to_embedding_response(
193
+ final_res_batch_checked,
194
+ request_id,
195
+ created_time,
196
+ model_name,
197
+ encoding_format,
198
+ )
199
+ except asyncio.CancelledError:
200
+ return self.create_error_response("Client disconnected")
201
+ except ValueError as e:
202
+ # TODO: Use a vllm-specific Validation Error
203
+ return self.create_error_response(str(e))
204
+
205
+ return response
206
+
207
+ def request_output_to_embedding_response(
208
+ self,
209
+ final_res_batch: List[PoolingRequestOutput],
210
+ request_id: str,
211
+ created_time: int,
212
+ model_name: str,
213
+ encoding_format: Literal["float", "base64"],
214
+ ) -> EmbeddingResponse:
215
+ items: List[EmbeddingResponseData] = []
216
+ num_prompt_tokens = 0
217
+
218
+ for idx, final_res in enumerate(final_res_batch):
219
+ embedding_res = EmbeddingRequestOutput.from_base(final_res)
220
+
221
+ item = EmbeddingResponseData(
222
+ index=idx,
223
+ embedding=_get_embedding(embedding_res.outputs,
224
+ encoding_format),
225
+ )
226
+ prompt_token_ids = final_res.prompt_token_ids
227
+
228
+ items.append(item)
229
+ num_prompt_tokens += len(prompt_token_ids)
230
+
231
+ usage = UsageInfo(
232
+ prompt_tokens=num_prompt_tokens,
233
+ total_tokens=num_prompt_tokens,
234
+ )
235
+
236
+ return EmbeddingResponse(
237
+ id=request_id,
238
+ created=created_time,
239
+ model=model_name,
240
+ data=items,
241
+ usage=usage,
242
+ )
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_engine.py ADDED
@@ -0,0 +1,527 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import json
4
+ from concurrent.futures.thread import ThreadPoolExecutor
5
+ from http import HTTPStatus
6
+ from typing import (Any, Callable, Dict, Iterable, Iterator, List, Mapping,
7
+ Optional, Sequence, Tuple, TypedDict, Union)
8
+
9
+ from fastapi import Request
10
+ from pydantic import Field
11
+ from starlette.datastructures import Headers
12
+ from typing_extensions import Annotated
13
+
14
+ from vllm.config import ModelConfig
15
+ from vllm.engine.protocol import EngineClient
16
+ # yapf conflicts with isort for this block
17
+ # yapf: disable
18
+ from vllm.entrypoints.chat_utils import (ChatCompletionMessageParam,
19
+ ChatTemplateContentFormatOption,
20
+ ConversationMessage,
21
+ apply_hf_chat_template,
22
+ apply_mistral_chat_template,
23
+ parse_chat_messages_futures,
24
+ resolve_chat_template_content_format)
25
+ from vllm.entrypoints.logger import RequestLogger
26
+ from vllm.entrypoints.openai.protocol import (ChatCompletionRequest,
27
+ CompletionRequest,
28
+ DetokenizeRequest,
29
+ EmbeddingChatRequest,
30
+ EmbeddingCompletionRequest,
31
+ ErrorResponse, RerankRequest,
32
+ ScoreRequest,
33
+ TokenizeChatRequest,
34
+ TokenizeCompletionRequest,
35
+ TranscriptionRequest)
36
+ from vllm.entrypoints.openai.serving_models import OpenAIServingModels
37
+ from vllm.entrypoints.openai.tool_parsers import ToolParser
38
+ # yapf: enable
39
+ from vllm.inputs import TokensPrompt
40
+ from vllm.inputs.parse import parse_and_batch_prompt
41
+ from vllm.logger import init_logger
42
+ from vllm.lora.request import LoRARequest
43
+ from vllm.pooling_params import PoolingParams
44
+ from vllm.prompt_adapter.request import PromptAdapterRequest
45
+ from vllm.sampling_params import BeamSearchParams, SamplingParams
46
+ from vllm.sequence import Logprob
47
+ from vllm.tracing import (contains_trace_headers, extract_trace_headers,
48
+ log_tracing_disabled_warning)
49
+ from vllm.transformers_utils.tokenizer import AnyTokenizer, MistralTokenizer
50
+ from vllm.utils import is_list_of, make_async, random_uuid
51
+
52
+ logger = init_logger(__name__)
53
+
54
+ CompletionLikeRequest = Union[CompletionRequest, DetokenizeRequest,
55
+ EmbeddingCompletionRequest, ScoreRequest,
56
+ TokenizeCompletionRequest]
57
+
58
+ ChatLikeRequest = Union[ChatCompletionRequest, EmbeddingChatRequest,
59
+ TokenizeChatRequest]
60
+
61
+ AnyRequest = Union[CompletionLikeRequest, ChatLikeRequest,
62
+ TranscriptionRequest]
63
+
64
+
65
+ class TextTokensPrompt(TypedDict):
66
+ prompt: str
67
+ prompt_token_ids: List[int]
68
+
69
+
70
+ RequestPrompt = Union[List[int], str, TextTokensPrompt]
71
+
72
+
73
+ class OpenAIServing:
74
+
75
+ def __init__(
76
+ self,
77
+ engine_client: EngineClient,
78
+ model_config: ModelConfig,
79
+ models: OpenAIServingModels,
80
+ *,
81
+ request_logger: Optional[RequestLogger],
82
+ return_tokens_as_token_ids: bool = False,
83
+ ):
84
+ super().__init__()
85
+
86
+ self.engine_client = engine_client
87
+ self.model_config = model_config
88
+ self.max_model_len = model_config.max_model_len
89
+
90
+ self.models = models
91
+
92
+ self.request_logger = request_logger
93
+ self.return_tokens_as_token_ids = return_tokens_as_token_ids
94
+
95
+ self._tokenizer_executor = ThreadPoolExecutor(max_workers=1)
96
+
97
+ self._tokenize_prompt_input_async = make_async(
98
+ self._tokenize_prompt_input, executor=self._tokenizer_executor)
99
+ self._tokenize_prompt_input_or_inputs_async = make_async(
100
+ self._tokenize_prompt_input_or_inputs,
101
+ executor=self._tokenizer_executor)
102
+
103
+ def create_error_response(
104
+ self,
105
+ message: str,
106
+ err_type: str = "BadRequestError",
107
+ status_code: HTTPStatus = HTTPStatus.BAD_REQUEST) -> ErrorResponse:
108
+ return ErrorResponse(message=message,
109
+ type=err_type,
110
+ code=status_code.value)
111
+
112
+ def create_streaming_error_response(
113
+ self,
114
+ message: str,
115
+ err_type: str = "BadRequestError",
116
+ status_code: HTTPStatus = HTTPStatus.BAD_REQUEST) -> str:
117
+ json_str = json.dumps({
118
+ "error":
119
+ self.create_error_response(message=message,
120
+ err_type=err_type,
121
+ status_code=status_code).model_dump()
122
+ })
123
+ return json_str
124
+
125
+ async def _check_model(
126
+ self,
127
+ request: AnyRequest,
128
+ ) -> Optional[ErrorResponse]:
129
+ if self._is_model_supported(request.model):
130
+ return None
131
+ if request.model in [
132
+ lora.lora_name for lora in self.models.lora_requests
133
+ ]:
134
+ return None
135
+ if request.model in [
136
+ prompt_adapter.prompt_adapter_name
137
+ for prompt_adapter in self.models.prompt_adapter_requests
138
+ ]:
139
+ return None
140
+ return self.create_error_response(
141
+ message=f"The model `{request.model}` does not exist.",
142
+ err_type="NotFoundError",
143
+ status_code=HTTPStatus.NOT_FOUND)
144
+
145
+ def _maybe_get_adapters(
146
+ self, request: AnyRequest
147
+ ) -> Union[Tuple[None, None], Tuple[LoRARequest, None], Tuple[
148
+ None, PromptAdapterRequest]]:
149
+ if self._is_model_supported(request.model):
150
+ return None, None
151
+ for lora in self.models.lora_requests:
152
+ if request.model == lora.lora_name:
153
+ return lora, None
154
+ for prompt_adapter in self.models.prompt_adapter_requests:
155
+ if request.model == prompt_adapter.prompt_adapter_name:
156
+ return None, prompt_adapter
157
+ # if _check_model has been called earlier, this will be unreachable
158
+ raise ValueError(f"The model `{request.model}` does not exist.")
159
+
160
+ def _normalize_prompt_text_to_input(
161
+ self,
162
+ request: AnyRequest,
163
+ tokenizer: AnyTokenizer,
164
+ prompt: str,
165
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]],
166
+ add_special_tokens: bool,
167
+ ) -> TextTokensPrompt:
168
+ if (self.model_config.encoder_config is not None
169
+ and self.model_config.encoder_config.get(
170
+ "do_lower_case", False)):
171
+ prompt = prompt.lower()
172
+
173
+ if truncate_prompt_tokens is None:
174
+ encoded = tokenizer(prompt, add_special_tokens=add_special_tokens)
175
+ else:
176
+ encoded = tokenizer(prompt,
177
+ add_special_tokens=add_special_tokens,
178
+ truncation=True,
179
+ max_length=truncate_prompt_tokens)
180
+
181
+ input_ids = encoded.input_ids
182
+
183
+ input_text = prompt
184
+
185
+ return self._validate_input(request, input_ids, input_text)
186
+
187
+ def _normalize_prompt_tokens_to_input(
188
+ self,
189
+ request: AnyRequest,
190
+ tokenizer: AnyTokenizer,
191
+ prompt_ids: List[int],
192
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]],
193
+ ) -> TextTokensPrompt:
194
+ if truncate_prompt_tokens is None:
195
+ input_ids = prompt_ids
196
+ else:
197
+ input_ids = prompt_ids[-truncate_prompt_tokens:]
198
+
199
+ input_text = tokenizer.decode(input_ids)
200
+
201
+ return self._validate_input(request, input_ids, input_text)
202
+
203
+ def _validate_input(
204
+ self,
205
+ request: AnyRequest,
206
+ input_ids: List[int],
207
+ input_text: str,
208
+ ) -> TextTokensPrompt:
209
+ token_num = len(input_ids)
210
+
211
+ # Note: EmbeddingRequest and ScoreRequest doesn't have max_tokens
212
+ if isinstance(request,
213
+ (EmbeddingChatRequest, EmbeddingCompletionRequest,
214
+ ScoreRequest, RerankRequest)):
215
+
216
+ operation = "score" if isinstance(request, ScoreRequest) \
217
+ else "embedding generation"
218
+ if token_num > self.max_model_len:
219
+ raise ValueError(
220
+ f"This model's maximum context length is "
221
+ f"{self.max_model_len} tokens. However, you requested "
222
+ f"{token_num} tokens in the input for {operation}. "
223
+ f"Please reduce the length of the input.")
224
+ return TextTokensPrompt(prompt=input_text,
225
+ prompt_token_ids=input_ids)
226
+
227
+ # Note: TokenizeRequest and DetokenizeRequest doesn't have max_tokens
228
+ # and does not require model context length validation
229
+ if isinstance(request, (TokenizeCompletionRequest, TokenizeChatRequest,
230
+ DetokenizeRequest)):
231
+ return TextTokensPrompt(prompt=input_text,
232
+ prompt_token_ids=input_ids)
233
+
234
+ # chat completion endpoint supports max_completion_tokens
235
+ if isinstance(request, ChatCompletionRequest):
236
+ # TODO(#9845): remove max_tokens when field dropped from OpenAI API
237
+ max_tokens = request.max_completion_tokens or request.max_tokens
238
+ else:
239
+ max_tokens = request.max_tokens
240
+ if max_tokens is None:
241
+ if token_num >= self.max_model_len:
242
+ raise ValueError(
243
+ f"This model's maximum context length is "
244
+ f"{self.max_model_len} tokens. However, you requested "
245
+ f"{token_num} tokens in the messages, "
246
+ f"Please reduce the length of the messages.")
247
+ elif token_num + max_tokens > self.max_model_len:
248
+ raise ValueError(
249
+ f"This model's maximum context length is "
250
+ f"{self.max_model_len} tokens. However, you requested "
251
+ f"{max_tokens + token_num} tokens "
252
+ f"({token_num} in the messages, "
253
+ f"{max_tokens} in the completion). "
254
+ f"Please reduce the length of the messages or completion.")
255
+
256
+ return TextTokensPrompt(prompt=input_text, prompt_token_ids=input_ids)
257
+
258
+ def _tokenize_prompt_input(
259
+ self,
260
+ request: AnyRequest,
261
+ tokenizer: AnyTokenizer,
262
+ prompt_input: Union[str, List[int]],
263
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
264
+ add_special_tokens: bool = True,
265
+ ) -> TextTokensPrompt:
266
+ """
267
+ A simpler implementation of :meth:`_tokenize_prompt_input_or_inputs`
268
+ that assumes single input.
269
+ """
270
+ return next(
271
+ self._tokenize_prompt_inputs(
272
+ request,
273
+ tokenizer,
274
+ [prompt_input],
275
+ truncate_prompt_tokens=truncate_prompt_tokens,
276
+ add_special_tokens=add_special_tokens,
277
+ ))
278
+
279
+ def _tokenize_prompt_inputs(
280
+ self,
281
+ request: AnyRequest,
282
+ tokenizer: AnyTokenizer,
283
+ prompt_inputs: Iterable[Union[str, List[int]]],
284
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
285
+ add_special_tokens: bool = True,
286
+ ) -> Iterator[TextTokensPrompt]:
287
+ """
288
+ A simpler implementation of :meth:`_tokenize_prompt_input_or_inputs`
289
+ that assumes multiple inputs.
290
+ """
291
+ for text in prompt_inputs:
292
+ if isinstance(text, str):
293
+ yield self._normalize_prompt_text_to_input(
294
+ request,
295
+ tokenizer,
296
+ prompt=text,
297
+ truncate_prompt_tokens=truncate_prompt_tokens,
298
+ add_special_tokens=add_special_tokens,
299
+ )
300
+ else:
301
+ yield self._normalize_prompt_tokens_to_input(
302
+ request,
303
+ tokenizer,
304
+ prompt_ids=text,
305
+ truncate_prompt_tokens=truncate_prompt_tokens,
306
+ )
307
+
308
+ def _tokenize_prompt_input_or_inputs(
309
+ self,
310
+ request: AnyRequest,
311
+ tokenizer: AnyTokenizer,
312
+ input_or_inputs: Union[str, List[str], List[int], List[List[int]]],
313
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
314
+ add_special_tokens: bool = True,
315
+ ) -> List[TextTokensPrompt]:
316
+ """
317
+ Tokenize/detokenize depending on the input format.
318
+
319
+ According to `OpenAI API <https://platform.openai.com/docs/api-reference/embeddings/create>`_
320
+ , each input can be a string or array of tokens. Note that each request
321
+ can pass one or more inputs.
322
+ """
323
+ # Although our type checking is based on mypy,
324
+ # VSCode Pyright extension should still work properly
325
+ # "is True" is required for Pyright to perform type narrowing
326
+ # See: https://github.com/microsoft/pyright/issues/7672
327
+ return [
328
+ self._normalize_prompt_text_to_input(
329
+ request,
330
+ tokenizer,
331
+ prompt=prompt_input["content"],
332
+ truncate_prompt_tokens=truncate_prompt_tokens,
333
+ add_special_tokens=add_special_tokens)
334
+ if prompt_input["is_tokens"] is False else
335
+ self._normalize_prompt_tokens_to_input(
336
+ request,
337
+ tokenizer,
338
+ prompt_ids=prompt_input["content"],
339
+ truncate_prompt_tokens=truncate_prompt_tokens)
340
+ for prompt_input in parse_and_batch_prompt(input_or_inputs)
341
+ ]
342
+
343
+ async def _preprocess_completion(
344
+ self,
345
+ request: CompletionLikeRequest,
346
+ tokenizer: AnyTokenizer,
347
+ input_or_inputs: Union[str, List[str], List[int], List[List[int]]],
348
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
349
+ add_special_tokens: bool = True,
350
+ ) -> Tuple[List[TextTokensPrompt], List[TokensPrompt]]:
351
+ request_prompts = await self._tokenize_prompt_input_or_inputs_async(
352
+ request,
353
+ tokenizer,
354
+ input_or_inputs,
355
+ truncate_prompt_tokens=truncate_prompt_tokens,
356
+ add_special_tokens=add_special_tokens,
357
+ )
358
+
359
+ engine_prompts = [
360
+ TokensPrompt(prompt_token_ids=request_prompt["prompt_token_ids"])
361
+ for request_prompt in request_prompts
362
+ ]
363
+
364
+ return request_prompts, engine_prompts
365
+
366
+ async def _preprocess_chat(
367
+ self,
368
+ request: ChatLikeRequest,
369
+ tokenizer: AnyTokenizer,
370
+ messages: List[ChatCompletionMessageParam],
371
+ chat_template: Optional[str],
372
+ chat_template_content_format: ChatTemplateContentFormatOption,
373
+ add_generation_prompt: bool = True,
374
+ continue_final_message: bool = False,
375
+ tool_dicts: Optional[List[Dict[str, Any]]] = None,
376
+ documents: Optional[List[Dict[str, str]]] = None,
377
+ chat_template_kwargs: Optional[Dict[str, Any]] = None,
378
+ tool_parser: Optional[Callable[[AnyTokenizer], ToolParser]] = None,
379
+ truncate_prompt_tokens: Optional[Annotated[int, Field(ge=1)]] = None,
380
+ add_special_tokens: bool = False,
381
+ ) -> Tuple[List[ConversationMessage], Sequence[RequestPrompt],
382
+ List[TokensPrompt]]:
383
+ resolved_content_format = resolve_chat_template_content_format(
384
+ chat_template,
385
+ chat_template_content_format,
386
+ tokenizer,
387
+ )
388
+ conversation, mm_data_future = parse_chat_messages_futures(
389
+ messages,
390
+ self.model_config,
391
+ tokenizer,
392
+ content_format=resolved_content_format,
393
+ )
394
+
395
+ _chat_template_kwargs: Dict[str, Any] = dict(
396
+ chat_template=chat_template,
397
+ add_generation_prompt=add_generation_prompt,
398
+ continue_final_message=continue_final_message,
399
+ tools=tool_dicts,
400
+ documents=documents,
401
+ )
402
+ _chat_template_kwargs.update(chat_template_kwargs or {})
403
+
404
+ request_prompt: Union[str, List[int]]
405
+ if isinstance(tokenizer, MistralTokenizer):
406
+ request_prompt = apply_mistral_chat_template(
407
+ tokenizer,
408
+ messages=messages,
409
+ **_chat_template_kwargs,
410
+ )
411
+ else:
412
+ request_prompt = apply_hf_chat_template(
413
+ tokenizer,
414
+ conversation=conversation,
415
+ **_chat_template_kwargs,
416
+ )
417
+
418
+ mm_data = await mm_data_future
419
+
420
+ # tool parsing is done only if a tool_parser has been set and if
421
+ # tool_choice is not "none" (if tool_choice is "none" but a tool_parser
422
+ # is set, we want to prevent parsing a tool_call hallucinated by the LLM
423
+ should_parse_tools = tool_parser is not None and (hasattr(
424
+ request, "tool_choice") and request.tool_choice != "none")
425
+
426
+ if should_parse_tools:
427
+ if not isinstance(request, ChatCompletionRequest):
428
+ msg = "Tool usage is only supported for Chat Completions API"
429
+ raise NotImplementedError(msg)
430
+
431
+ request = tool_parser(tokenizer).adjust_request( # type: ignore
432
+ request=request)
433
+
434
+ if isinstance(request_prompt, str):
435
+ prompt_inputs = await self._tokenize_prompt_input_async(
436
+ request,
437
+ tokenizer,
438
+ request_prompt,
439
+ truncate_prompt_tokens=truncate_prompt_tokens,
440
+ add_special_tokens=add_special_tokens,
441
+ )
442
+ else:
443
+ # For MistralTokenizer
444
+ assert is_list_of(request_prompt, int), (
445
+ "Prompt has to be either a string or a list of token ids")
446
+ prompt_inputs = TextTokensPrompt(
447
+ prompt=tokenizer.decode(request_prompt),
448
+ prompt_token_ids=request_prompt)
449
+
450
+ engine_prompt = TokensPrompt(
451
+ prompt_token_ids=prompt_inputs["prompt_token_ids"])
452
+ if mm_data is not None:
453
+ engine_prompt["multi_modal_data"] = mm_data
454
+ if request.mm_processor_kwargs is not None:
455
+ engine_prompt["mm_processor_kwargs"] = request.mm_processor_kwargs
456
+
457
+ return conversation, [request_prompt], [engine_prompt]
458
+
459
+ def _log_inputs(
460
+ self,
461
+ request_id: str,
462
+ inputs: RequestPrompt,
463
+ params: Optional[Union[SamplingParams, PoolingParams,
464
+ BeamSearchParams]],
465
+ lora_request: Optional[LoRARequest],
466
+ prompt_adapter_request: Optional[PromptAdapterRequest],
467
+ ) -> None:
468
+ if self.request_logger is None:
469
+ return
470
+
471
+ if isinstance(inputs, str):
472
+ prompt = inputs
473
+ prompt_token_ids = None
474
+ elif isinstance(inputs, list):
475
+ prompt = None
476
+ prompt_token_ids = inputs
477
+ else:
478
+ prompt = inputs["prompt"]
479
+ prompt_token_ids = inputs["prompt_token_ids"]
480
+
481
+ self.request_logger.log_inputs(
482
+ request_id,
483
+ prompt,
484
+ prompt_token_ids,
485
+ params=params,
486
+ lora_request=lora_request,
487
+ prompt_adapter_request=prompt_adapter_request,
488
+ )
489
+
490
+ async def _get_trace_headers(
491
+ self,
492
+ headers: Headers,
493
+ ) -> Optional[Mapping[str, str]]:
494
+ is_tracing_enabled = await self.engine_client.is_tracing_enabled()
495
+
496
+ if is_tracing_enabled:
497
+ return extract_trace_headers(headers)
498
+
499
+ if contains_trace_headers(headers):
500
+ log_tracing_disabled_warning()
501
+
502
+ return None
503
+
504
+ @staticmethod
505
+ def _base_request_id(raw_request: Optional[Request],
506
+ default: Optional[str] = None) -> Optional[str]:
507
+ """Pulls the request id to use from a header, if provided"""
508
+ default = default or random_uuid()
509
+ if raw_request is None:
510
+ return default
511
+
512
+ return raw_request.headers.get("X-Request-Id", default)
513
+
514
+ @staticmethod
515
+ def _get_decoded_token(logprob: Logprob,
516
+ token_id: int,
517
+ tokenizer: AnyTokenizer,
518
+ return_as_token_id: bool = False) -> str:
519
+ if return_as_token_id:
520
+ return f"token_id:{token_id}"
521
+
522
+ if logprob.decoded_token is not None:
523
+ return logprob.decoded_token
524
+ return tokenizer.decode(token_id)
525
+
526
+ def _is_model_supported(self, model_name):
527
+ return self.models.is_base_model(model_name)
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_models.py ADDED
@@ -0,0 +1,244 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import json
4
+ import pathlib
5
+ from dataclasses import dataclass
6
+ from http import HTTPStatus
7
+ from typing import List, Optional, Union
8
+
9
+ from vllm.config import ModelConfig
10
+ from vllm.engine.protocol import EngineClient
11
+ from vllm.entrypoints.openai.protocol import (ErrorResponse,
12
+ LoadLoraAdapterRequest,
13
+ ModelCard, ModelList,
14
+ ModelPermission,
15
+ UnloadLoraAdapterRequest)
16
+ from vllm.logger import init_logger
17
+ from vllm.lora.request import LoRARequest
18
+ from vllm.prompt_adapter.request import PromptAdapterRequest
19
+ from vllm.utils import AtomicCounter
20
+
21
+ logger = init_logger(__name__)
22
+
23
+
24
+ @dataclass
25
+ class BaseModelPath:
26
+ name: str
27
+ model_path: str
28
+
29
+
30
+ @dataclass
31
+ class PromptAdapterPath:
32
+ name: str
33
+ local_path: str
34
+
35
+
36
+ @dataclass
37
+ class LoRAModulePath:
38
+ name: str
39
+ path: str
40
+ base_model_name: Optional[str] = None
41
+
42
+
43
+ class OpenAIServingModels:
44
+ """Shared instance to hold data about the loaded base model(s) and adapters.
45
+
46
+ Handles the routes:
47
+ - /v1/models
48
+ - /v1/load_lora_adapter
49
+ - /v1/unload_lora_adapter
50
+ """
51
+
52
+ def __init__(
53
+ self,
54
+ engine_client: EngineClient,
55
+ model_config: ModelConfig,
56
+ base_model_paths: List[BaseModelPath],
57
+ *,
58
+ lora_modules: Optional[List[LoRAModulePath]] = None,
59
+ prompt_adapters: Optional[List[PromptAdapterPath]] = None,
60
+ ):
61
+ super().__init__()
62
+
63
+ self.base_model_paths = base_model_paths
64
+ self.max_model_len = model_config.max_model_len
65
+ self.engine_client = engine_client
66
+
67
+ self.static_lora_modules = lora_modules
68
+ self.lora_requests: List[LoRARequest] = []
69
+ self.lora_id_counter = AtomicCounter(0)
70
+
71
+ self.prompt_adapter_requests = []
72
+ if prompt_adapters is not None:
73
+ for i, prompt_adapter in enumerate(prompt_adapters, start=1):
74
+ with pathlib.Path(prompt_adapter.local_path,
75
+ "adapter_config.json").open() as f:
76
+ adapter_config = json.load(f)
77
+ num_virtual_tokens = adapter_config["num_virtual_tokens"]
78
+ self.prompt_adapter_requests.append(
79
+ PromptAdapterRequest(
80
+ prompt_adapter_name=prompt_adapter.name,
81
+ prompt_adapter_id=i,
82
+ prompt_adapter_local_path=prompt_adapter.local_path,
83
+ prompt_adapter_num_virtual_tokens=num_virtual_tokens))
84
+
85
+ async def init_static_loras(self):
86
+ """Loads all static LoRA modules.
87
+ Raises if any fail to load"""
88
+ if self.static_lora_modules is None:
89
+ return
90
+ for lora in self.static_lora_modules:
91
+ load_request = LoadLoraAdapterRequest(lora_path=lora.path,
92
+ lora_name=lora.name)
93
+ load_result = await self.load_lora_adapter(
94
+ request=load_request, base_model_name=lora.base_model_name)
95
+ if isinstance(load_result, ErrorResponse):
96
+ raise ValueError(load_result.message)
97
+
98
+ def is_base_model(self, model_name):
99
+ return any(model.name == model_name for model in self.base_model_paths)
100
+
101
+ def model_name(self, lora_request: Optional[LoRARequest] = None) -> str:
102
+ """Returns the appropriate model name depending on the availability
103
+ and support of the LoRA or base model.
104
+ Parameters:
105
+ - lora: LoRARequest that contain a base_model_name.
106
+ Returns:
107
+ - str: The name of the base model or the first available model path.
108
+ """
109
+ if lora_request is not None:
110
+ return lora_request.lora_name
111
+ return self.base_model_paths[0].name
112
+
113
+ async def show_available_models(self) -> ModelList:
114
+ """Show available models. This includes the base model and all
115
+ adapters"""
116
+ model_cards = [
117
+ ModelCard(id=base_model.name,
118
+ max_model_len=self.max_model_len,
119
+ root=base_model.model_path,
120
+ permission=[ModelPermission()])
121
+ for base_model in self.base_model_paths
122
+ ]
123
+ lora_cards = [
124
+ ModelCard(id=lora.lora_name,
125
+ root=lora.local_path,
126
+ parent=lora.base_model_name if lora.base_model_name else
127
+ self.base_model_paths[0].name,
128
+ permission=[ModelPermission()])
129
+ for lora in self.lora_requests
130
+ ]
131
+ prompt_adapter_cards = [
132
+ ModelCard(id=prompt_adapter.prompt_adapter_name,
133
+ root=self.base_model_paths[0].name,
134
+ permission=[ModelPermission()])
135
+ for prompt_adapter in self.prompt_adapter_requests
136
+ ]
137
+ model_cards.extend(lora_cards)
138
+ model_cards.extend(prompt_adapter_cards)
139
+ return ModelList(data=model_cards)
140
+
141
+ async def load_lora_adapter(
142
+ self,
143
+ request: LoadLoraAdapterRequest,
144
+ base_model_name: Optional[str] = None
145
+ ) -> Union[ErrorResponse, str]:
146
+ error_check_ret = await self._check_load_lora_adapter_request(request)
147
+ if error_check_ret is not None:
148
+ return error_check_ret
149
+
150
+ lora_name, lora_path = request.lora_name, request.lora_path
151
+ unique_id = self.lora_id_counter.inc(1)
152
+ lora_request = LoRARequest(lora_name=lora_name,
153
+ lora_int_id=unique_id,
154
+ lora_path=lora_path)
155
+ if base_model_name is not None and self.is_base_model(base_model_name):
156
+ lora_request.base_model_name = base_model_name
157
+
158
+ # Validate that the adapter can be loaded into the engine
159
+ # This will also pre-load it for incoming requests
160
+ try:
161
+ await self.engine_client.add_lora(lora_request)
162
+ except BaseException as e:
163
+ error_type = "BadRequestError"
164
+ status_code = HTTPStatus.BAD_REQUEST
165
+ if isinstance(e, ValueError) and "No adapter found" in str(e):
166
+ error_type = "NotFoundError"
167
+ status_code = HTTPStatus.NOT_FOUND
168
+
169
+ return create_error_response(message=str(e),
170
+ err_type=error_type,
171
+ status_code=status_code)
172
+
173
+ self.lora_requests.append(lora_request)
174
+ logger.info("Loaded new LoRA adapter: name '%s', path '%s'", lora_name,
175
+ lora_path)
176
+ return f"Success: LoRA adapter '{lora_name}' added successfully."
177
+
178
+ async def unload_lora_adapter(
179
+ self,
180
+ request: UnloadLoraAdapterRequest) -> Union[ErrorResponse, str]:
181
+ error_check_ret = await self._check_unload_lora_adapter_request(request
182
+ )
183
+ if error_check_ret is not None:
184
+ return error_check_ret
185
+
186
+ lora_name = request.lora_name
187
+ self.lora_requests = [
188
+ lora_request for lora_request in self.lora_requests
189
+ if lora_request.lora_name != lora_name
190
+ ]
191
+ logger.info("Removed LoRA adapter: name '%s'", lora_name)
192
+ return f"Success: LoRA adapter '{lora_name}' removed successfully."
193
+
194
+ async def _check_load_lora_adapter_request(
195
+ self, request: LoadLoraAdapterRequest) -> Optional[ErrorResponse]:
196
+ # Check if both 'lora_name' and 'lora_path' are provided
197
+ if not request.lora_name or not request.lora_path:
198
+ return create_error_response(
199
+ message="Both 'lora_name' and 'lora_path' must be provided.",
200
+ err_type="InvalidUserInput",
201
+ status_code=HTTPStatus.BAD_REQUEST)
202
+
203
+ # Check if the lora adapter with the given name already exists
204
+ if any(lora_request.lora_name == request.lora_name
205
+ for lora_request in self.lora_requests):
206
+ return create_error_response(
207
+ message=
208
+ f"The lora adapter '{request.lora_name}' has already been "
209
+ "loaded.",
210
+ err_type="InvalidUserInput",
211
+ status_code=HTTPStatus.BAD_REQUEST)
212
+
213
+ return None
214
+
215
+ async def _check_unload_lora_adapter_request(
216
+ self,
217
+ request: UnloadLoraAdapterRequest) -> Optional[ErrorResponse]:
218
+ # Check if either 'lora_name' or 'lora_int_id' is provided
219
+ if not request.lora_name and not request.lora_int_id:
220
+ return create_error_response(
221
+ message=
222
+ "either 'lora_name' and 'lora_int_id' needs to be provided.",
223
+ err_type="InvalidUserInput",
224
+ status_code=HTTPStatus.BAD_REQUEST)
225
+
226
+ # Check if the lora adapter with the given name exists
227
+ if not any(lora_request.lora_name == request.lora_name
228
+ for lora_request in self.lora_requests):
229
+ return create_error_response(
230
+ message=
231
+ f"The lora adapter '{request.lora_name}' cannot be found.",
232
+ err_type="NotFoundError",
233
+ status_code=HTTPStatus.NOT_FOUND)
234
+
235
+ return None
236
+
237
+
238
+ def create_error_response(
239
+ message: str,
240
+ err_type: str = "BadRequestError",
241
+ status_code: HTTPStatus = HTTPStatus.BAD_REQUEST) -> ErrorResponse:
242
+ return ErrorResponse(message=message,
243
+ type=err_type,
244
+ code=status_code.value)
infer_4_30_0/lib/python3.10/site-packages/vllm/entrypoints/openai/serving_pooling.py ADDED
@@ -0,0 +1,235 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # SPDX-License-Identifier: Apache-2.0
2
+
3
+ import asyncio
4
+ import base64
5
+ import time
6
+ from typing import AsyncGenerator, Final, List, Literal, Optional, Union, cast
7
+
8
+ import numpy as np
9
+ from fastapi import Request
10
+ from typing_extensions import assert_never
11
+
12
+ from vllm.config import ModelConfig
13
+ from vllm.engine.protocol import EngineClient
14
+ from vllm.entrypoints.chat_utils import ChatTemplateContentFormatOption
15
+ from vllm.entrypoints.logger import RequestLogger
16
+ from vllm.entrypoints.openai.protocol import (ErrorResponse,
17
+ PoolingChatRequest,
18
+ PoolingRequest, PoolingResponse,
19
+ PoolingResponseData, UsageInfo)
20
+ from vllm.entrypoints.openai.serving_engine import OpenAIServing
21
+ from vllm.entrypoints.openai.serving_models import OpenAIServingModels
22
+ from vllm.logger import init_logger
23
+ from vllm.outputs import PoolingOutput, PoolingRequestOutput
24
+ from vllm.utils import merge_async_iterators
25
+
26
+ logger = init_logger(__name__)
27
+
28
+
29
+ def _get_data(
30
+ output: PoolingOutput,
31
+ encoding_format: Literal["float", "base64"],
32
+ ) -> Union[List[float], str]:
33
+ if encoding_format == "float":
34
+ return output.data.tolist()
35
+ elif encoding_format == "base64":
36
+ # Force to use float32 for base64 encoding
37
+ # to match the OpenAI python client behavior
38
+ pooling_bytes = np.array(output.data, dtype="float32").tobytes()
39
+ return base64.b64encode(pooling_bytes).decode("utf-8")
40
+
41
+ assert_never(encoding_format)
42
+
43
+
44
+ class OpenAIServingPooling(OpenAIServing):
45
+
46
+ def __init__(
47
+ self,
48
+ engine_client: EngineClient,
49
+ model_config: ModelConfig,
50
+ models: OpenAIServingModels,
51
+ *,
52
+ request_logger: Optional[RequestLogger],
53
+ chat_template: Optional[str],
54
+ chat_template_content_format: ChatTemplateContentFormatOption,
55
+ ) -> None:
56
+ super().__init__(engine_client=engine_client,
57
+ model_config=model_config,
58
+ models=models,
59
+ request_logger=request_logger)
60
+
61
+ self.chat_template = chat_template
62
+ self.chat_template_content_format: Final = chat_template_content_format
63
+
64
+ async def create_pooling(
65
+ self,
66
+ request: PoolingRequest,
67
+ raw_request: Optional[Request] = None,
68
+ ) -> Union[PoolingResponse, ErrorResponse]:
69
+ """
70
+ See https://platform.openai.com/docs/api-reference/embeddings/create
71
+ for the API specification. This API mimics the OpenAI Embedding API.
72
+ """
73
+ error_check_ret = await self._check_model(request)
74
+ if error_check_ret is not None:
75
+ return error_check_ret
76
+
77
+ encoding_format = request.encoding_format
78
+ if request.dimensions is not None:
79
+ return self.create_error_response(
80
+ "dimensions is currently not supported")
81
+
82
+ model_name = request.model
83
+ request_id = f"pool-{self._base_request_id(raw_request)}"
84
+ created_time = int(time.time())
85
+
86
+ truncate_prompt_tokens = None
87
+
88
+ if request.truncate_prompt_tokens is not None:
89
+ if request.truncate_prompt_tokens <= self.max_model_len:
90
+ truncate_prompt_tokens = request.truncate_prompt_tokens
91
+ else:
92
+ return self.create_error_response(
93
+ "truncate_prompt_tokens value is "
94
+ "greater than max_model_len."
95
+ " Please, select a smaller truncation size.")
96
+
97
+ try:
98
+ (
99
+ lora_request,
100
+ prompt_adapter_request,
101
+ ) = self._maybe_get_adapters(request)
102
+
103
+ tokenizer = await self.engine_client.get_tokenizer(lora_request)
104
+
105
+ if prompt_adapter_request is not None:
106
+ raise NotImplementedError("Prompt adapter is not supported "
107
+ "for pooling models")
108
+
109
+ if isinstance(request, PoolingChatRequest):
110
+ (
111
+ _,
112
+ request_prompts,
113
+ engine_prompts,
114
+ ) = await self._preprocess_chat(
115
+ request,
116
+ tokenizer,
117
+ request.messages,
118
+ chat_template=request.chat_template or self.chat_template,
119
+ chat_template_content_format=self.
120
+ chat_template_content_format,
121
+ # In pooling requests, we are not generating tokens,
122
+ # so there is no need to append extra tokens to the input
123
+ add_generation_prompt=False,
124
+ continue_final_message=False,
125
+ truncate_prompt_tokens=truncate_prompt_tokens,
126
+ add_special_tokens=request.add_special_tokens,
127
+ )
128
+ else:
129
+ (request_prompts,
130
+ engine_prompts) = await self._preprocess_completion(
131
+ request,
132
+ tokenizer,
133
+ request.input,
134
+ truncate_prompt_tokens=truncate_prompt_tokens,
135
+ add_special_tokens=request.add_special_tokens,
136
+ )
137
+ except ValueError as e:
138
+ logger.exception("Error in preprocessing prompt inputs")
139
+ return self.create_error_response(str(e))
140
+
141
+ # Schedule the request and get the result generator.
142
+ generators: List[AsyncGenerator[PoolingRequestOutput, None]] = []
143
+ try:
144
+ pooling_params = request.to_pooling_params()
145
+
146
+ for i, engine_prompt in enumerate(engine_prompts):
147
+ request_id_item = f"{request_id}-{i}"
148
+
149
+ self._log_inputs(request_id_item,
150
+ request_prompts[i],
151
+ params=pooling_params,
152
+ lora_request=lora_request,
153
+ prompt_adapter_request=prompt_adapter_request)
154
+
155
+ trace_headers = (None if raw_request is None else await
156
+ self._get_trace_headers(raw_request.headers))
157
+
158
+ generator = self.engine_client.encode(
159
+ engine_prompt,
160
+ pooling_params,
161
+ request_id_item,
162
+ lora_request=lora_request,
163
+ trace_headers=trace_headers,
164
+ priority=request.priority,
165
+ )
166
+
167
+ generators.append(generator)
168
+ except ValueError as e:
169
+ # TODO: Use a vllm-specific Validation Error
170
+ return self.create_error_response(str(e))
171
+
172
+ result_generator = merge_async_iterators(*generators)
173
+
174
+ num_prompts = len(engine_prompts)
175
+
176
+ # Non-streaming response
177
+ final_res_batch: List[Optional[PoolingRequestOutput]]
178
+ final_res_batch = [None] * num_prompts
179
+ try:
180
+ async for i, res in result_generator:
181
+ final_res_batch[i] = res
182
+
183
+ assert all(final_res is not None for final_res in final_res_batch)
184
+
185
+ final_res_batch_checked = cast(List[PoolingRequestOutput],
186
+ final_res_batch)
187
+
188
+ response = self.request_output_to_pooling_response(
189
+ final_res_batch_checked,
190
+ request_id,
191
+ created_time,
192
+ model_name,
193
+ encoding_format,
194
+ )
195
+ except asyncio.CancelledError:
196
+ return self.create_error_response("Client disconnected")
197
+ except ValueError as e:
198
+ # TODO: Use a vllm-specific Validation Error
199
+ return self.create_error_response(str(e))
200
+
201
+ return response
202
+
203
+ def request_output_to_pooling_response(
204
+ self,
205
+ final_res_batch: List[PoolingRequestOutput],
206
+ request_id: str,
207
+ created_time: int,
208
+ model_name: str,
209
+ encoding_format: Literal["float", "base64"],
210
+ ) -> PoolingResponse:
211
+ items: List[PoolingResponseData] = []
212
+ num_prompt_tokens = 0
213
+
214
+ for idx, final_res in enumerate(final_res_batch):
215
+ item = PoolingResponseData(
216
+ index=idx,
217
+ data=_get_data(final_res.outputs, encoding_format),
218
+ )
219
+ prompt_token_ids = final_res.prompt_token_ids
220
+
221
+ items.append(item)
222
+ num_prompt_tokens += len(prompt_token_ids)
223
+
224
+ usage = UsageInfo(
225
+ prompt_tokens=num_prompt_tokens,
226
+ total_tokens=num_prompt_tokens,
227
+ )
228
+
229
+ return PoolingResponse(
230
+ id=request_id,
231
+ created=created_time,
232
+ model=model_name,
233
+ data=items,
234
+ usage=usage,
235
+ )