Skip to content

Commit

Permalink
[Refactor][MISC] del redundant code in ParallelConfig.postinit (vllm-…
Browse files Browse the repository at this point in the history
…project#10614)

Signed-off-by: MengqingCao <[email protected]>
Signed-off-by: Maxime Fournioux <[email protected]>
  • Loading branch information
MengqingCao authored and mfournioux committed Nov 28, 2024
1 parent 42adcc8 commit c296b25
Showing 1 changed file with 5 additions and 10 deletions.
15 changes: 5 additions & 10 deletions vllm/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -998,20 +998,15 @@ def __post_init__(self) -> None:
raise ValueError(f"worker-use-ray can't be used with "
f"distributed executor backend "
f"'{self.distributed_executor_backend}'.")

if current_platform.is_tpu() and self.world_size > 1:
if self.distributed_executor_backend is None:
self.distributed_executor_backend = "ray"
if self.distributed_executor_backend != "ray":
raise ValueError(
"TPU backend only supports Ray for distributed inference.")

if current_platform.is_hpu() and self.world_size > 1:
ray_only_devices = ["tpu", "hpu"]
if (current_platform.device_type in ray_only_devices
and self.world_size > 1):
if self.distributed_executor_backend is None:
self.distributed_executor_backend = "ray"
if self.distributed_executor_backend != "ray":
raise ValueError(
"HPU backend only supports Ray for distributed inference.")
f"{current_platform.device_type.upper()} backend only "
"supports Ray for distributed inference.")

if self.distributed_executor_backend is None and self.world_size > 1:
# We use multiprocessing by default if world_size fits on the
Expand Down

0 comments on commit c296b25

Please sign in to comment.