GradientClipByNorm

class paddle.fluid.clip.GradientClipByNorm(clip_norm)[source]

Convert the input multidimensional Tensor \(X\) to a multidimensional Tensor whose L2 norm does not exceed the given two-norm maximum ( \(clip\_norm\) ).

The tensor is not passed through this class, but passed through the parametre of main_program in fluid.program_guard.

This class limits the L2 norm of the input \(X\) within \(clip\_norm\).

\[\begin{split}Out = \left \{ \begin{aligned} & X & & if (norm(X) \leq clip\_norm) \\ & \frac{clip\_norm*X}{norm(X)} & & if (norm(X) > clip\_norm) \\ \end{aligned} \right.\end{split}\]

where \(norm(X)\) represents the L2 norm of \(X\).

\[norm(X) = ( \sum_{i=1}^{n}|x\_i|^2)^{ \frac{1}{2}}\]
Parameters

clip_norm (float) – The maximum norm value

Examples

import paddle.fluid as fluid
import paddle.fluid.core as core
import paddle
place = core.CPUPlace()
prog = fluid.framework.Program()
startup_program = fluid.framework.Program()
with fluid.program_guard(
            main_program=prog, startup_program=startup_program):
    image = fluid.data(name='x', shape=[None, 784], dtype='float32', lod_level=0)
    label = fluid.data(name='y', shape=[None, 1], dtype='int64', lod_level=0)
    hidden1 = fluid.layers.fc(input=image, size=128, act='relu')
    hidden2 = fluid.layers.fc(input=hidden1, size=64, act='relu')
    predict = fluid.layers.fc(input=hidden2, size=10, act='softmax')
    cost = fluid.layers.cross_entropy(input=predict, label=label)
    avg_cost = fluid.layers.mean(cost)
prog_clip = prog.clone()
avg_cost_clip = prog_clip.block(0).var(avg_cost.name)
p_g = fluid.backward.append_backward(loss=avg_cost)
p_g_clip = fluid.backward.append_backward(loss=avg_cost_clip)
with fluid.program_guard(main_program=prog_clip, startup_program=startup_program):
    fluid.clip.set_gradient_clip(
        fluid.clip.GradientClipByNorm(clip_norm=2.0))
    p_g_clip = fluid.clip.append_gradient_clip_ops(p_g_clip)
grad_list = [elem[1] for elem in p_g]
grad_clip_list = [elem[1] for elem in p_g_clip]
train_reader = paddle.batch(
    paddle.reader.shuffle(
        paddle.dataset.mnist.train(), buf_size=8192),
    batch_size=128)

exe = fluid.Executor(place)
feeder = fluid.DataFeeder(feed_list=[image, label], place=place)
exe.run(startup_program)

count = 0
for data in train_reader():
    count += 1
    print("count:%s" % count)
    if count > 5:
       break
    out = exe.run(prog, feed=feeder.feed(data), fetch_list=grad_list)
    out_clip = exe.run(prog_clip,
                       feed=feeder.feed(data),
                       fetch_list=grad_clip_list)