7 examples of 'tf.keras.metrics.auc' in Python

Every line of 'tf.keras.metrics.auc' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
101def roc_auc_score(y_pred, y_true):
102 """ ROC AUC Score.
103 Approximates the Area Under Curve score, using approximation based on
104 the Wilcoxon-Mann-Whitney U statistic.
105 Yan, L., Dodier, R., Mozer, M. C., & Wolniewicz, R. (2003).
106 Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.
107 Measures overall performance for a full range of threshold levels.
108 Arguments:
109 y_pred: `Tensor`. Predicted values.
110 y_true: `Tensor` . Targets (labels), a probability distribution.
111 """
112 with tf.name_scope("RocAucScore"):
113
114 pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))
115 neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))
116
117 pos = tf.expand_dims(pos, 0)
118 neg = tf.expand_dims(neg, 1)
119
120 # original paper suggests performance is robust to exact parameter choice
121 gamma = 0.2
122 p = 3
123
124 difference = tf.zeros_like(pos * neg) + pos - neg - gamma
125
126 masked = tf.boolean_mask(difference, difference < 0.0)
127
128 return tf.reduce_sum(tf.pow(-masked, p))
179def roc_auc_score(y_pred, y_true):
180 """ ROC AUC Score.
181
182 Approximates the Area Under Curve score, using approximation based on
183 the Wilcoxon-Mann-Whitney U statistic.
184
185 Yan, L., Dodier, R., Mozer, M. C., & Wolniewicz, R. (2003).
186 Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.
187
188 Measures overall performance for a full range of threshold levels.
189
190 Arguments:
191 y_pred: `Tensor`. Predicted values.
192 y_true: `Tensor` . Targets (labels), a probability distribution.
193
194 """
195 with tf.name_scope("RocAucScore"):
196
197 pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))
198 neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))
199
200 pos = tf.expand_dims(pos, 0)
201 neg = tf.expand_dims(neg, 1)
202
203 # original paper suggests performance is robust to exact parameter choice
204 gamma = 0.2
205 p = 3
206
207 difference = tf.zeros_like(pos * neg) + pos - neg - gamma
208
209 masked = tf.boolean_mask(difference, difference < 0.0)
210
211 return tf.reduce_sum(tf.pow(-masked, p))
19def auc_score(y_true, y_pred, positive_label=1):
20 if hasattr(sklearn.metrics, 'roc_auc_score'):
21 return sklearn.metrics.roc_auc_score(y_true, y_pred)
22
23 fp_rate, tp_rate, thresholds = sklearn.metrics.roc_curve(
24 y_true, y_pred, pos_label=positive_label)
25 return sklearn.metrics.auc(fp_rate, tp_rate)
63def gini_tf_metric(y_true, y_pred):
64 return 2.0 * tf.metrics.auc(y_true, y_pred)[0] - 1.0
363def auc(self):
364 if self.type != DatasetType.binary:
365 # raise ValueError("AUC metric is only supported for binary classification: {}.".format(self.classes))
366 log.warning("AUC metric is only supported for binary classification: %s.", self.classes)
367 return nan
368 return float(roc_auc_score(self.truth, self.probabilities[:, 1]))
261def _add_metrics(self):
262 label_split = fluid.layers.split(
263 self.labels[0], self.num_classes, dim=-1)
264 # metrics change to auc of every class
265 eval_list = []
266 for index, probs in enumerate(self.outputs):
267 current_auc, _, _ = fluid.layers.auc(
268 input=probs, label=label_split[index])
269 eval_list.append(current_auc)
270 return eval_list
55@curry
56def auc_evaluator(test_data: pd.DataFrame,
57 prediction_column: str = "prediction",
58 target_column: str = "target",
59 eval_name: str = None) -> EvalReturnType:
60 """
61 Computes the ROC AUC score, given true label and prediction scores.
62
63 Parameters
64 ----------
65 test_data : Pandas' DataFrame
66 A Pandas' DataFrame with target and prediction scores.
67
68 prediction_column : Strings
69 The name of the column in `test_data` with the prediction scores.
70
71 target_column : String
72 The name of the column in `test_data` with the binary target.
73
74 eval_name : String, optional (default=None)
75 the name of the evaluator as it will appear in the logs.
76
77 Returns
78 ----------
79 log: dict
80 A log-like dictionary with the ROC AUC Score
81 """
82
83 warnings.warn("The method `auc_evaluator` will be renamed to `roc_auc_evaluator` in the next major release 2.0.0."
84 " Please use `roc_auc_evaluator` instead of `auc_evaluator` for Area Under the Curve of the"
85 " Receiver Operating Characteristics curve.")
86
87 return roc_auc_evaluator(test_data, prediction_column, target_column, eval_name)

Related snippets