This repository has been archived by the owner on Aug 10, 2023. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 9
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
- Loading branch information
1 parent
ce580f3
commit 2b6b220
Showing
2 changed files
with
57 additions
and
4 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,16 @@ | ||
#encoding: utf-8 | ||
|
||
import torch.cuda.comm as comm | ||
from utils.base import nccl_type_map | ||
|
||
def secure_broadcast_coalesced(tensors, devices, buffer_size=10485760): | ||
|
||
if nccl_type_map is None: | ||
|
||
return comm.broadcast_coalesced(tensors, devices, buffer_size=buffer_size) | ||
else: | ||
src_type = [para.dtype for para in tensors] | ||
map_type = [nccl_type_map[para.dtype] if para.dtype in nccl_type_map else None for para in tensors] | ||
rs = comm.broadcast_coalesced([para if typ is None else para.to(typ) for para, typ in zip(tensors, map_type)], devices, buffer_size=buffer_size) | ||
|
||
return list(zip(*[para if mtyp is None else [pu.to(styp) for pu in para] for para, mtyp, styp in zip(list(zip(*rs)), map_type, src_type)])) |