# 7 examples of 'tf.keras.metrics.auc' in Python

Every line of 'tf.keras.metrics.auc' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

## All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
``101def roc_auc_score(y_pred, y_true):102    """ ROC AUC Score.103    Approximates the Area Under Curve score, using approximation based on104    the Wilcoxon-Mann-Whitney U statistic.105    Yan, L., Dodier, R., Mozer, M. C., &amp; Wolniewicz, R. (2003).106    Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.107    Measures overall performance for a full range of threshold levels.108    Arguments:109        y_pred: `Tensor`. Predicted values.110        y_true: `Tensor` . Targets (labels), a probability distribution.111    """112    with tf.name_scope("RocAucScore"):113114        pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))115        neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))116117        pos = tf.expand_dims(pos, 0)118        neg = tf.expand_dims(neg, 1)119120        # original paper suggests performance is robust to exact parameter choice121        gamma = 0.2122        p     = 3123124        difference = tf.zeros_like(pos * neg) + pos - neg - gamma125126        masked = tf.boolean_mask(difference, difference &lt; 0.0)127128        return tf.reduce_sum(tf.pow(-masked, p))``
``179def roc_auc_score(y_pred, y_true):180    """ ROC AUC Score.181182    Approximates the Area Under Curve score, using approximation based on183    the Wilcoxon-Mann-Whitney U statistic.184185    Yan, L., Dodier, R., Mozer, M. C., &amp; Wolniewicz, R. (2003).186    Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.187188    Measures overall performance for a full range of threshold levels.189190    Arguments:191        y_pred: `Tensor`. Predicted values.192        y_true: `Tensor` . Targets (labels), a probability distribution.193194    """195    with tf.name_scope("RocAucScore"):196197        pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))198        neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))199200        pos = tf.expand_dims(pos, 0)201        neg = tf.expand_dims(neg, 1)202203        # original paper suggests performance is robust to exact parameter choice204        gamma = 0.2205        p     = 3206207        difference = tf.zeros_like(pos * neg) + pos - neg - gamma208209        masked = tf.boolean_mask(difference, difference &lt; 0.0)210211        return tf.reduce_sum(tf.pow(-masked, p))``
``19def auc_score(y_true, y_pred, positive_label=1):20    if hasattr(sklearn.metrics, 'roc_auc_score'):21        return sklearn.metrics.roc_auc_score(y_true, y_pred)2223    fp_rate, tp_rate, thresholds = sklearn.metrics.roc_curve(24        y_true, y_pred, pos_label=positive_label)25    return sklearn.metrics.auc(fp_rate, tp_rate)``
``63def gini_tf_metric(y_true, y_pred):64    return 2.0 * tf.metrics.auc(y_true, y_pred)[0] - 1.0``
``363def auc(self):364    if self.type != DatasetType.binary:365        # raise ValueError("AUC metric is only supported for binary classification: {}.".format(self.classes))366        log.warning("AUC metric is only supported for binary classification: %s.", self.classes)367        return nan368    return float(roc_auc_score(self.truth, self.probabilities[:, 1]))``
``261def _add_metrics(self):262    label_split = fluid.layers.split(263        self.labels[0], self.num_classes, dim=-1)264    # metrics change to auc of every class265    eval_list = []266    for index, probs in enumerate(self.outputs):267        current_auc, _, _ = fluid.layers.auc(268            input=probs, label=label_split[index])269        eval_list.append(current_auc)270    return eval_list``
``55@curry56def auc_evaluator(test_data: pd.DataFrame,57                  prediction_column: str = "prediction",58                  target_column: str = "target",59                  eval_name: str = None) -&gt; EvalReturnType:60    """61    Computes the ROC AUC score, given true label and prediction scores.6263    Parameters64    ----------65    test_data : Pandas' DataFrame66        A Pandas' DataFrame with target and prediction scores.6768    prediction_column : Strings69        The name of the column in `test_data` with the prediction scores.7071    target_column : String72        The name of the column in `test_data` with the binary target.7374    eval_name : String, optional (default=None)75        the name of the evaluator as it will appear in the logs.7677    Returns78    ----------79    log: dict80        A log-like dictionary with the ROC AUC Score81    """8283    warnings.warn("The method `auc_evaluator` will be renamed to `roc_auc_evaluator` in the next major release 2.0.0."84                  " Please use `roc_auc_evaluator` instead of `auc_evaluator` for Area Under the Curve of the"85                  " Receiver Operating Characteristics curve.")8687    return roc_auc_evaluator(test_data, prediction_column, target_column, eval_name)``