# 10 examples of 'sparse_categorical_crossentropy' in Python

Every line of 'sparse_categorical_crossentropy' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

## All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
``53def sparse_loss(y_true, y_pred):54    return K.sparse_categorical_crossentropy(y_true, y_pred, from_logits=True)``
``21def sparse_categorical_crossentropy(p_y_pred, y_gt):22    p_y_pred = K.clip(p_y_pred, _EPSILON, 1. - _EPSILON)23    y_gt = K.to_one_hot(y_gt, )24    return K.mean(K.categorical_crossentropy(p_y_pred, y_gt))``
``4def categorical_crossentropy(y, t):5    loss = \6        tf.reduce_mean(-tf.reduce_sum(7                       t * tf.log(tf.clip_by_value(y, 1e-10, 1.0)),8                       axis=list(range(1, len(y.get_shape())))))910    return loss``
``60def batch_crossentropy(label, logits):61    """Calculates the cross-entropy for a batch of logits.6263    Parameters64    ----------65    logits : array_like66        The logits predicted by the model for a batch of inputs.67    label : int68        The label describing the target distribution.6970    Returns71    -------72    np.ndarray73        The cross-entropy between softmax(logits[i]) and onehot(label)74        for all i.7576    """7778    assert logits.ndim == 27980    # for numerical reasons we subtract the max logit81    # (mathematically it doesn't matter!)82    # otherwise exp(logits) might become too large or too small83    logits = logits - np.max(logits, axis=1, keepdims=True)84    e = np.exp(logits)85    s = np.sum(e, axis=1)86    ces = np.log(s) - logits[:, label]87    return ces``
``76def categorical_crossentropy(preds, labels):77    return np.mean(-np.log(np.extract(labels, preds)))``
``3188def categorical_crossentropy(target, output, from_logits=False):3189  """Categorical crossentropy between an output tensor and a target tensor.31903191  Arguments:3192      target: A tensor of the same shape as `output`.3193      output: A tensor resulting from a softmax3194          (unless `from_logits` is True, in which3195          case `output` is expected to be the logits).3196      from_logits: Boolean, whether `output` is the3197          result of a softmax, or is a tensor of logits.31983199  Returns:3200      Output tensor.3201  """3202  # Note: nn.softmax_cross_entropy_with_logits3203  # expects logits, Keras expects probabilities.3204  if not from_logits:3205    # scale preds so that the class probas of each sample sum to 13206    output /= math_ops.reduce_sum(3207        output, len(output.get_shape()) - 1, True)3208    # manual computation of crossentropy3209    epsilon_ = _to_tensor(epsilon(), output.dtype.base_dtype)3210    output = clip_ops.clip_by_value(output, epsilon_, 1. - epsilon_)3211    return -math_ops.reduce_sum(3212        target * math_ops.log(output),3213        axis=len(output.get_shape()) - 1)3214  else:3215    return nn.softmax_cross_entropy_with_logits(labels=target, logits=output)``
``31def crossentropy(label, logits):32    """Calculates the cross-entropy.3334    Parameters35    ----------36    logits : array_like37        The logits predicted by the model.38    label : int39        The label describing the target distribution.4041    Returns42    -------43    float44        The cross-entropy between softmax(logits) and onehot(label).4546    """4748    assert logits.ndim == 14950    # for numerical reasons we subtract the max logit51    # (mathematically it doesn't matter!)52    # otherwise exp(logits) might become too large or too small53    logits = logits - np.max(logits)54    e = np.exp(logits)55    s = np.sum(e)56    ce = np.log(s) - logits[label]57    return ce``
``6def softmax_sparse_crossentropy_ignoring_last_label(y_true, y_pred):7    y_pred = K.reshape(y_pred, (-1, K.int_shape(y_pred)[-1]))8    log_softmax = tf.nn.log_softmax(y_pred)910    y_true = K.one_hot(tf.to_int32(K.flatten(y_true)), K.int_shape(y_pred)[-1]+1)11    unpacked = tf.unstack(y_true, axis=-1)12    y_true = tf.stack(unpacked[:-1], axis=-1)1314    cross_entropy = -K.sum(y_true * log_softmax, axis=1)15    cross_entropy_mean = K.mean(cross_entropy)1617    return cross_entropy_mean``
``52def cross_entropy(self, y):53    return T.nnet.categorical_crossentropy(self.p_y_given_x, y)``
``17@staticmethod18def one_hot_crossentropy(y_true, y_pred):1920    # nan, inf 를 막기 위해 keras 에서 처리해주는 거 그대로 따왔음.21    if theano.config.floatX == 'float64':22        epsilon = 1.0e-923    else:24        epsilon = 1.0e-72526    # 0 ~ 1 사이의 값으로 짤라줌.27    # ( 사실 softmax 가 0 ~ 1 값을 주므로 불필요하지만,28    # 혹시나 nan, inf 값이 나올까봐 처리하는 것 같음)29    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)30    # scale preds so that the class probas of each sample sum to 131    y_pred /= y_pred.sum(axis=-1, keepdims=True)3233    # volabulary size34    voca_size = T.shape(y_pred)[-1]3536    # indexing 을 위해 1D array 로 변환.37    y_pred = y_pred.flatten()38    y_true = y_true.flatten().astype('int32')3940    # y_true의 word vector index를 1D array 에 맞게 변환.41    ix = T.arange(y_true.size) * voca_size + y_true4243    # indexing instead of summation44    cce = -T.log(y_pred[ix])4546    return cce``