10 examples of 'binary cross entropy keras' in Python

Every line of 'binary cross entropy keras' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
52def cross_entropy(self, y):
53 return T.nnet.categorical_crossentropy(self.p_y_given_x, y)
12def weighted_binary_cross_entropy(targets, predictions, class_weights):
13
14 predictions = tf.clip_by_value(predictions,1e-7,1-1e-7)
15 return tf.reduce_mean(class_weights*(targets*tf.log(predictions) + (1-targets)*tf.log(1-predictions)), axis=get_reduce_axis(targets))
298def _cross_entropy(self, x, labels):
299 x = tf.reshape(x, [-1, self.num_classes])
300 cross_entropy = -tf.reduce_sum(
301 (labels * tf.math.log(tf.clip_by_value(x, 1e-10, 1.0))),
302 axis=[1]
303 )
304 cross_entropy_mean = tf.reduce_mean(cross_entropy, name="cross_entropy_mean")
305
306 return cross_entropy_mean
113def cross_entropy(self, y):
114 if self.mini_batch:
115 return T.mean(T.sum(T.nnet.categorical_crossentropy(self.y_t, y), axis=1)) # naive batch-normalization
116 else:
117 return T.sum(T.nnet.categorical_crossentropy(self.y_t, y))
123def binary_crossentropy(predictions, targets):
124 """Computes the binary cross-entropy between predictions and targets.
125
126 .. math:: L = -t \\log(p) - (1 - t) \\log(1 - p)
127
128 Parameters
129 ----------
130 predictions : Theano tensor
131 Predictions in (0, 1), such as sigmoidal output of a neural network.
132 targets : Theano tensor
133 Targets in [0, 1], such as ground truth labels.
134
135 Returns
136 -------
137 Theano tensor
138 An expression for the element-wise binary cross-entropy.
139
140 Notes
141 -----
142 This is the loss function of choice for binary classification problems
143 and sigmoid output units.
144 """
145 predictions, targets = align_targets(predictions, targets)
146 return theano.tensor.nnet.binary_crossentropy(predictions, targets)
22def cross_entropy(self, y_true, y_pred):
23 y_pred = K.maximum(K.minimum(y_pred, 1 - 1e-15), 1e-15)
24 cross_entropy_loss = - K.sum(y_true * K.log(y_pred), axis=-1)
25 return cross_entropy_loss
7def WeightedBinaryCrossEntropy(x_true, eps):
8 def WeightedBinaryCrossEntropy_(y_true, y_pred):
9 err = -((y_true*K.log(y_pred)) + ((1-y_true)*K.log(1-y_pred)))
10
11 probs = K.mean(x_true,axis=(1,2,3),keepdims=True)
12 weights_pos, weights_neg = 1./(probs+eps), 1./((1-probs)+eps)
13 weights = (x_true*weights_pos) + ((1-x_true)*weights_neg)
14
15 return K.mean(err*weights)
16
17 return WeightedBinaryCrossEntropy_
17def cross_entropy(logit, prob):
18 return K.sum(prob * K.tf.nn.log_softmax(logit), axis = 1)
32def cross_entropy_loss(y, yhat):
33 """
34 Compute the cross entropy loss in tensorflow.
35
36 y is a one-hot tensor of shape (n_samples, n_classes) and yhat is a tensor
37 of shape (n_samples, n_classes). y should be of dtype tf.int32, and yhat should
38 be of dtype tf.float32.
39
40 The functions tf.to_float, tf.reduce_sum, and tf.log might prove useful. (Many
41 solutions are possible, so you may not need to use all of these functions).
42
43 Note: You are NOT allowed to use the tensorflow built-in cross-entropy
44 functions.
45
46 Args:
47 y: tf.Tensor with shape (n_samples, n_classes). One-hot encoded.
48 yhat: tf.Tensorwith shape (n_sample, n_classes). Each row encodes a
49 probability distribution and should sum to 1.
50 Returns:
51 out: tf.Tensor with shape (1,) (Scalar output). You need to construct this
52 tensor in the problem.
53 """
54 ### YOUR CODE HERE
55 out = tf.reduce_sum(-tf.to_float(y) * tf.log(yhat))
56 ### END YOUR CODE
57 return out
60def batch_crossentropy(label, logits):
61 """Calculates the cross-entropy for a batch of logits.
62
63 Parameters
64 ----------
65 logits : array_like
66 The logits predicted by the model for a batch of inputs.
67 label : int
68 The label describing the target distribution.
69
70 Returns
71 -------
72 np.ndarray
73 The cross-entropy between softmax(logits[i]) and onehot(label)
74 for all i.
75
76 """
77
78 assert logits.ndim == 2
79
80 # for numerical reasons we subtract the max logit
81 # (mathematically it doesn't matter!)
82 # otherwise exp(logits) might become too large or too small
83 logits = logits - np.max(logits, axis=1, keepdims=True)
84 e = np.exp(logits)
85 s = np.sum(e, axis=1)
86 ces = np.log(s) - logits[:, label]
87 return ces

Related snippets