10 examples of 'sklearn metrics accuracy' in Python

Every line of 'sklearn metrics accuracy' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
52def test_accuracy(clf, X_test, y_test):
53 X_test = np.array([item for sublist in X_test for item in sublist])
54 y_test = np.array([item for sublist in y_test for item in sublist])
55 return clf.score(X_test, y_test)
114def accuracy(y_test, y_pred):
115 """Computes the accuracy score.
116
117 Args:
118 y_test: np.array 1-D array of true class labels
119 y_pred: np.array 1-D array of predicted class labels
120
121 Returns:
122 accuracy: float
123 accuracy score
124 """
125 return metrics.accuracy_score(y_test, y_pred)
29def testAccuracy(data_test, label_test, kNN):
30 return kNN.score(data_test, label_test)
29@unhot
30def accuracy(actual, predicted):
31 return 1.0 - classification_error(actual, predicted)
37def accuracy(y_test, y_pred):
38 return np.sum(y_test==y_pred)/len(y_test)
7def accuracy(params):
8 clf = RandomForestClassifier(**params)
9 clf.fit(x_train,y_train)
10 return clf.score(x_test, y_test)
26def accuracy_score(self, x_test, y_test):
27 estimation = self.model.predict(x_test)
28 return acc_func(estimation,y_test)
207def accuracy(score_vec, label_vec, thresholds=None):
208 assert len(score_vec.shape)==1
209 assert len(label_vec.shape)==1
210 assert score_vec.shape == label_vec.shape
211 assert label_vec.dtype==np.bool
212 # find thresholds by TAR
213 if thresholds is None:
214 score_pos = score_vec[label_vec==True]
215 thresholds = np.sort(score_pos)[::1]
216
217 assert len(thresholds.shape)==1
218 if np.size(thresholds) > 10000:
219 warn('number of thresholds (%d) very large, computation may take a long time!' % np.size(thresholds))
220
221 # Loop Computation
222 accuracies = np.zeros(np.size(thresholds))
223 for i, threshold in enumerate(thresholds):
224 pred_vec = score_vec>=threshold
225 accuracies[i] = np.mean(pred_vec==label_vec)
226
227 # Matrix Computation, Each column is a threshold
228 # predictions = score_vec[:,None] >= thresholds[None,:]
229 # accuracies = np.mean(predictions==label_vec[:,None], axis=0)
230
231 argmax = np.argmax(accuracies)
232 accuracy = accuracies[argmax]
233 threshold = np.mean(thresholds[accuracies==accuracy])
234
235 return accuracy, threshold
272def cluster_acc(y_true, y_pred):
273 """
274 calculating the accuracy of the clustering.
275 since the index of each cluster might be different in y_true and y_pred, this function finds the linear
276 assignment which maximizes the accuracy. This means some of the clusters might remain without a matching label.
277 :param y_true: ground truth labeling
278 :param y_pred: calculated from the model
279 :return: the accuracy percentage, ami, nmi and the matrix w of all the combinations of indexes of the original clusters
280 and the calculated ones
281 """
282 assert y_pred.size == y_true.size
283 y_true_unique = np.unique(y_true)
284 true_cluster_idx = np.nonzero(y_true[:, None] == y_true_unique)[1]
285 D = max(y_pred.max()+1, len(y_true_unique)) # number of clusters
286 w = np.zeros((D, len(y_true_unique)), dtype=np.int64) # D is in size number of clusters*number of clusters
287 for i in range(y_pred.size):
288 w[y_pred[i], true_cluster_idx[i]] += 1
289 ind = linear_assignment(w.max() - w)
290 # calculating the corresponding gt label most fit for each y_pred. since there are usually a lot of clusters,
291 # the ones which didn't correspond to a value in the gt will receive the value -1
292 y_pred_new = -1 * np.ones(len(y_pred), int)
293 for i in range(0, len(y_pred)):
294 j = np.argwhere(ind[:, 0] == y_pred[i])
295 if j.shape[0] > 0:
296 y_pred_new[i] = (ind[j[0], 1])
297 acc = sum([w[i, j] for i, j in ind])*1.0/y_pred.size
298 ami = adjusted_mutual_info_score(y_true, y_pred)
299 nmi = normalized_mutual_info_score(y_true, y_pred)
300 return acc, ami, nmi, w, y_pred_new
425@torch.no_grad()
426def compute_accuracy_classifier(clf, data_train, labels_train, data_test, labels_test):
427 clf.fit(data_train, labels_train)
428 # Predicting the labels
429 y_pred_test = clf.predict(data_test)
430 y_pred_train = clf.predict(data_train)
431
432 return (
433 (
434 compute_accuracy_tuple(labels_train, y_pred_train),
435 compute_accuracy_tuple(labels_test, y_pred_test),
436 ),
437 y_pred_test,
438 )

Related snippets