init_parallel_env

paddle.distributed. init_parallel_env ( ) [source]

Initialize parallel training environment in dynamic graph mode.

Note

Now initialize both NCCL and GLOO contexts for communication.

Parameters

backend (string) – A string represents the backend used by DataParallel, should be one of ‘gloo’(for cpu), ‘nccl’(for cuda), ‘bkcl’(for xpu), ‘auto’(auto detect). The auto detection prefer ‘nccl’, ‘bkcl’ than ‘gloo’.

Returns

None

Examples

>>> 
>>> import paddle
>>> import paddle.nn as nn
>>> import paddle.optimizer as opt
>>> import paddle.distributed as dist

>>> class LinearNet(nn.Layer):
...     def __init__(self):
...         super().__init__()
...         self._linear1 = nn.Linear(10, 10)
...         self._linear2 = nn.Linear(10, 1)
...     def forward(self, x):
...         return self._linear2(self._linear1(x))

>>> def train():
...     # 1. initialize parallel environment
...     dist.init_parallel_env()
...     # 2. create data parallel layer & optimizer
...     layer = LinearNet()
...     dp_layer = paddle.DataParallel(layer)
...     loss_fn = nn.MSELoss()
...     adam = opt.Adam(
...         learning_rate=0.001, parameters=dp_layer.parameters())
...     # 3. run layer
...     inputs = paddle.randn([10, 10], 'float32')
...     outputs = dp_layer(inputs)
...     labels = paddle.randn([10, 1], 'float32')
...     loss = loss_fn(outputs, labels)
...     loss.backward()
...     adam.step()
...     adam.clear_grad()

>>> if __name__ == '__main__':
...     dist.spawn(train)