[ 仅 API 调用方式不一致 ]torch.distributed.ReduceOp.PRODUCT
torch.distributed.ReduceOp.PRODUCT
torch.distributed.ReduceOp.PRODUCT
转写示例
# PyTorch 写法
torch.distributed.reduce_scatter(
data1,
[data1, data2],
op=torch.distributed.ReduceOp.PRODUCT,
group=None,
async_op=False
)
# Paddle 写法
paddle.distributed.reduce_scatter(
tensor=data1,
tensor_list=[data1, data2],
op=paddle.distributed.ReduceOp.PROD,
group=None,
sync_op=not False,
)