Skip to content

Commit

Permalink
Type-fix: make execute_model output type optional (vllm-project#12020)
Browse files Browse the repository at this point in the history
  • Loading branch information
youngkent authored and jikunshang committed Jan 21, 2025
1 parent f999f68 commit 8742287
Show file tree
Hide file tree
Showing 2 changed files with 2 additions and 1 deletion.
1 change: 1 addition & 0 deletions vllm/v1/executor/uniproc_executor.py
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,7 @@ def execute_model(
scheduler_output,
) -> ModelRunnerOutput:
output = self.worker.execute_model(scheduler_output)
assert output is not None
return output

def profile(self, is_start: bool = True):
Expand Down
2 changes: 1 addition & 1 deletion vllm/v1/worker/gpu_worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ def compile_or_warm_up_model(self) -> None:
def execute_model(
self,
scheduler_output: "SchedulerOutput",
) -> ModelRunnerOutput:
) -> Optional[ModelRunnerOutput]:
output = self.model_runner.execute_model(scheduler_output)
return output if self.rank == 0 else None

Expand Down

0 comments on commit 8742287

Please sign in to comment.