Every line of 'how to calculate auc manually' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
654 def calc_auc(x, y): 655 """ Given x and y values it calculates the approx. integral and normalizes it: area under curve""" 656 integral = np.trapz(y, x) 657 norm = np.trapz(np.ones_like(y), x) 658 659 return integral / norm
363 def auc(self): 364 if self.type != DatasetType.binary: 365 # raise ValueError("AUC metric is only supported for binary classification: {}.".format(self.classes)) 366 log.warning("AUC metric is only supported for binary classification: %s.", self.classes) 367 return nan 368 return float(roc_auc_score(self.truth, self.probabilities[:, 1]))
19 def auc_score(y_true, y_pred, positive_label=1): 20 if hasattr(sklearn.metrics, 'roc_auc_score'): 21 return sklearn.metrics.roc_auc_score(y_true, y_pred) 22 23 fp_rate, tp_rate, thresholds = sklearn.metrics.roc_curve( 24 y_true, y_pred, pos_label=positive_label) 25 return sklearn.metrics.auc(fp_rate, tp_rate)
98 def plot_auc(self): 99 if self.n_classes != 2: 100 display("plot_auc() not yet implemented for multiclass classifiers") 101 return None 102 103 # Move binarized to classifier 104 y_true_binarized = label_binarize(self.y_true, classes=self.classes) 105 y_pred_binarized = 1 - self.y_pred_proba 106 107 y_true_binarized = np.hstack((y_true_binarized, 1 - y_true_binarized)) 108 y_pred_binarized = np.hstack((y_pred_binarized, 1 - y_pred_binarized)) 109 110 fig = plt.figure() 111 112 fpr = dict() 113 tpr = dict() 114 roc_auc = dict() 115 for i in range(self.n_classes): 116 fpr[i], tpr[i], _ = sklearn.metrics.roc_curve( 117 y_true_binarized[:, i], y_pred_binarized[:, i] 118 ) 119 roc_auc[i] = sklearn.metrics.auc(fpr[i], tpr[i]) 120 121 # return roc_auc 122 self._plot_auc_label(fig, fpr[i], tpr[i], roc_auc[i], i) 123 124 display(HTML("<h2>AUC Plot</h2>")) 125 display(fig)
59 def _auc_arr(score): 60 score_p = score[:,0] 61 score_n = score[:,1] 62 63 score_arr = [] 64 for s in score_p.tolist(): 65 score_arr.append([0,1,s]) 66 for s in score_n.tolist(): 67 score_arr.append([1,0,s]) 68 return score_arr
190 def compute_negative_cross_auc(df, subgroup, label, model_name): 191 """Computes the AUC of the within-subgroup negative examples and the background positive examples.""" 192 subgroup_negative_examples = df[df[subgroup] & ~df[label]] 193 non_subgroup_positive_examples = df[~df[subgroup] & df[label]] 194 examples = subgroup_negative_examples.append(non_subgroup_positive_examples) 195 return compute_auc(examples[label], examples[model_name])
111 def calc_metrics(testy, scores): 112 precision, recall, _ = precision_recall_curve(testy, scores) 113 roc_auc = roc_auc_score(testy, scores) 114 prc_auc = auc(recall, precision) 115 116 return roc_auc, prc_auc