赞
踩
参考 torch.distributed.init_process_group() - 云+社区 - 腾讯云
目录
torch.distributed.is_available()[source]
class torch.distributed.Backend
torch.distributed.get_backend(group=)[source]
torch.distributed.get_rank(group=)[source]
torch.distributed.get_world_size(group=)[source]
torch.distributed.is_initialized()[source]
torch.distributed.is_mpi_available()[source]
torch.distributed.is_nccl_available()[source]
torch.distributed.new_group(ranks=None, timeout=datetime.timedelta(0, 1800), backend=None)[source]
torch.distributed.send(tensor, dst, group=, tag=0)[source]
torch.distributed.recv(tensor, src=None, group=, tag=0)[source]
torch.distributed.isend(tensor, dst, group=, tag=0)[source]
torch.distributed.irecv(tensor, src, group=, tag=0)[source]
torch.distributed.broadcast(tensor, src, group=, async_op=False)[source]
torch.distributed.all_reduce(tensor, op=ReduceOp.SUM, group=, async_op=False)[source]
torch.distributed.reduce(tensor, dst, op=ReduceOp.SUM, group=, async_op=False)[source]
torch.distributed.all_gather(tensor_list, tensor, group=, async_op=False)[source]
torch.distributed.gather(tensor, gather_list=None, dst=0, group=, async_op=False)[source]
torch.distributed.scatter(tensor, scatter_list=None, src=0, group=, async_op=False)[source]
torch.distributed.all_to_all(output_tensor_list, input_tensor_list, group=, async_op=False)[source]
torch.distributed.barrier(group=, async_op=False)[source]
class torch.distributed.ReduceOp
class torch.distributed.reduce_op[source]
torch.distributed.broadcast_multigpu(tensor_list, src, group=, async_op=False, src_tensor=0)[source]
torch.distributed.all_reduce_multigpu(tensor_list, op=ReduceOp.SUM, group=, async_op=False)[source]
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。