10 examples of 'sparse_categorical_crossentropy' in Python

Every line of 'sparse_categorical_crossentropy' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
this disclaimer
53def sparse_loss(y_true, y_pred):
54 return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)
Important

Use secure code every time

Secure your code as it's written. Use Snyk Code to scan source code in minutes – no build needed – and fix issues immediately. Enable Snyk Code

21def sparse_categorical_crossentropy(p_y_pred, y_gt):
22 p_y_pred = K.clip(p_y_pred, _EPSILON, 1. - _EPSILON)
23 y_gt = K.to_one_hot(y_gt, )
24 return K.mean(K.categorical_crossentropy(p_y_pred, y_gt))
4def categorical_crossentropy(y, t):
5 loss = \
6 tf.reduce_mean(-tf.reduce_sum(
7 t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)),
8 axis=list(range(1, len(y.get_shape())))))
9
10 return loss
60def batch_crossentropy(label, logits):
61 """Calculates the cross-entropy for a batch of logits.
62
63 Parameters
64 ----------
65 logits : array_like
66 The logits predicted by the model for a batch of inputs.
67 label : int
68 The label describing the target distribution.
69
70 Returns
71 -------
72 np.ndarray
73 The cross-entropy between softmax(logits[i]) and onehot(label)
74 for all i.
75
76 """
77
78 assert logits.ndim == 2
79
80 # for numerical reasons we subtract the max logit
81 # (mathematically it doesn't matter!)
82 # otherwise exp(logits) might become too large or too small
83 logits = logits - np.max(logits, axis=1, keepdims=True)
84 e = np.exp(logits)
85 s = np.sum(e, axis=1)
86 ces = np.log(s) - logits[:, label]
87 return ces
76def categorical_crossentropy(preds, labels):
77 return np.mean(-np.log(np.extract(labels, preds)))
3188def categorical_crossentropy(target, output, from_logits=False):
3189 """Categorical crossentropy between an output tensor and a target tensor.
3190
3191 Arguments:
3192 target: A tensor of the same shape as `output`.
3193 output: A tensor resulting from a softmax
3194 (unless `from_logits` is True, in which
3195 case `output` is expected to be the logits).
3196 from_logits: Boolean, whether `output` is the
3197 result of a softmax, or is a tensor of logits.
3198
3199 Returns:
3200 Output tensor.
3201 """
3202 # Note: nn.softmax_cross_entropy_with_logits
3203 # expects logits, Keras expects probabilities.
3204 if not from_logits:
3205 # scale preds so that the class probas of each sample sum to 1
3206 output /= math_ops.reduce_sum(
3207 output, len(output.get_shape()) - 1, True)
3208 # manual computation of crossentropy
3209 epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)
3210 output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)
3211 return -math_ops.reduce_sum(
3212 target * math_ops.log(output),
3213 axis=len(output.get_shape()) - 1)
3214 else:
3215 return nn.softmax_cross_entropy_with_logits(labels=target, logits=output)
31def crossentropy(label, logits):
32 """Calculates the cross-entropy.
33
34 Parameters
35 ----------
36 logits : array_like
37 The logits predicted by the model.
38 label : int
39 The label describing the target distribution.
40
41 Returns
42 -------
43 float
44 The cross-entropy between softmax(logits) and onehot(label).
45
46 """
47
48 assert logits.ndim == 1
49
50 # for numerical reasons we subtract the max logit
51 # (mathematically it doesn't matter!)
52 # otherwise exp(logits) might become too large or too small
53 logits = logits - np.max(logits)
54 e = np.exp(logits)
55 s = np.sum(e)
56 ce = np.log(s) - logits[label]
57 return ce
6def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred):
7 y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))
8 log_softmax = tf.nn.log_softmax(y_pred)
9
10 y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)
11 unpacked = tf.unstack(y_true, axis=-1)
12 y_true = tf.stack(unpacked[:-1], axis=-1)
13
14 cross_entropy = -K.sum(y_true * log_softmax, axis=1)
15 cross_entropy_mean = K.mean(cross_entropy)
16
17 return cross_entropy_mean
52def cross_entropy(self, y):
53 return T.nnet.categorical_crossentropy(self.p_y_given_x, y)
17@staticmethod
18def one_hot_crossentropy(y_true, y_pred):
19
20 # nan, inf 를 막기 위해 keras 에서 처리해주는 거 그대로 따왔음.
21 if theano.config.floatX == 'float64':
22 epsilon = 1.0e-9
23 else:
24 epsilon = 1.0e-7
25
26 # 0 ~ 1 사이의 값으로 짤라줌.
27 # ( 사실 softmax 가 0 ~ 1 값을 주므로 불필요하지만,
28 # 혹시나 nan, inf 값이 나올까봐 처리하는 것 같음)
29 y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
30 # scale preds so that the class probas of each sample sum to 1
31 y_pred /= y_pred.sum(axis=-1, keepdims=True)
32
33 # volabulary size
34 voca_size = T.shape(y_pred)[-1]
35
36 # indexing 을 위해 1D array 로 변환.
37 y_pred = y_pred.flatten()
38 y_true = y_true.flatten().astype('int32')
39
40 # y_true의 word vector index를 1D array 에 맞게 변환.
41 ix = T.arange(y_true.size) * voca_size + y_true
42
43 # indexing instead of summation
44 cce = -T.log(y_pred[ix])
45
46 return cce

Related snippets