6 examples of 'l2 regularization pytorch' in Python

Every line of 'l2 regularization pytorch' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
56def get_regularization(self, l):
57 y = []
58
59 for block, is_const in zip(self._blocks, self._const_params):
60 if not is_const:
61 y.append(block.get_regularization(l))
62
63 return sum(y)
17def regularization_loss(self):
18 return self.regularization_loss
259def _L2_reg(self, lambda_, w1, w2):
260 """Compute L2-regularization cost"""
261 return (lambda_/2.0) * (np.sum(w1[:, 1:] ** 2) + np.sum(w2[:, 1:] ** 2))
14def L2(tensor, wd=0.001):
15 """ L2.
16
17 Computes half the L2 norm of a tensor without the `sqrt`:
18
19 output = sum(t ** 2) / 2 * wd
20
21 Arguments:
22 tensor: `Tensor`. The tensor to apply regularization.
23 wd: `float`. The decay.
24
25 Returns:
26 The regularization `Tensor`.
27
28 """
29 return tf.multiply(tf.nn.l2_loss(tensor), wd, name='L2-Loss')
29@property
30def l2_loss(self):
31 """ Compute l2 loss if weight_decay is desired """
32 if self.l2_regularizer is not None:
33 return tf.losses.get_regularization_loss(scope=self.name, name=self.name + 'l2_loss')
8def l21(parameter, bias=None, reg=0.01, lr=0.1):
9 """L21 Regularization"""
10
11 if bias is not None:
12 w_and_b = torch.cat((parameter, bias.unfold(0,1,1)),1)
13 else:
14 w_and_b = parameter
15 L21 = reg # lambda: regularization strength
16 Norm = (lr*L21/w_and_b.norm(2, dim=1))
17 if Norm.is_cuda:
18 ones = torch.ones(w_and_b.size(0), device=torch.device("cuda"))
19 else:
20 ones = torch.ones(w_and_b.size(0), device=torch.device("cpu"))
21 l21T = 1.0 - torch.min(ones, Norm)
22 update = (parameter*(l21T.unsqueeze(1)))
23 parameter.data = update
24 # Update bias
25 if bias is not None:
26 update_b = (bias*l21T)
27 bias.data = update_b

Related snippets