4 examples of 'scikit learn classification report' in Python

Every line of 'scikit learn classification report' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.

All examples are scanned by Snyk Code

By copying the Snyk Code Snippets you agree to
405def runClassReport(clf, A, Cl):
406 from sklearn.metrics import classification_report
407 y_pred = clf.predict(A)
408 print(classification_report(Cl, y_pred, target_names=clf.classes_))
409 print(' Precision is the probability that, given a classification result for a sample,\n' +
410 ' the sample actually belongs to that class. Recall (Accuracy) is the probability that a \n' +
411 ' sample will be correctly classified for a given class. f1-score combines both \n' +
412 ' accuracy and precision to give a single measure of relevancy of the classifier results.\n')
60def _check_classification_report(n_classes=2):
61 classifiers = ClassifiersFactory()
62 classifiers.add_classifier('gb', GradientBoostingClassifier(n_estimators=10))
63 classifiers.add_classifier('rf', RandomForestClassifier())
64 classifiers.add_classifier('ada', AdaBoostClassifier(n_estimators=10))
65
66 X, y = generate_classification_sample(1000, 5, n_classes=n_classes)
67 classifiers.fit(X, y)
68
69 X, y = generate_classification_sample(1000, 5, n_classes=n_classes)
70 test_lds = LabeledDataStorage(X, y, sample_weight=None)
71 report = classifiers.test_on_lds(test_lds)
72
73 val = numpy.mean(X['column0'])
74 labels_dict = None
75 if n_classes > 2:
76 labels_dict = {i: str(i) for i in range(n_classes)}
77 _classification_mask_report(report, "column0 > %f" % val, X, labels_dict)
78 _classification_mask_report(report, lambda x: numpy.array(x['column0']) < val, X, labels_dict)
79 _classification_mask_report(report, None, X, labels_dict)
80 check_classification_learning_curve_masks(report, n_classes=n_classes)
17def report(test_Y, pred_Y):
18 print "accuracy_score:"
19 print metrics.accuracy_score(test_Y, pred_Y)
20 print "f1_score:"
21 print metrics.f1_score(test_Y, pred_Y)
22 print "recall_score:"
23 print metrics.recall_score(test_Y, pred_Y)
24 print "precision_score:"
25 print metrics.precision_score(test_Y, pred_Y)
26 print "confusion_matrix:"
27 print metrics.confusion_matrix(test_Y, pred_Y)
28 print "AUC:"
29 print metrics.roc_auc_score(test_Y, pred_Y)
30
31 f_pos, t_pos, thresh = metrics.roc_curve(test_Y, pred_Y)
32 auc_area = metrics.auc(f_pos, t_pos)
33 plt.plot(f_pos, t_pos, 'darkorange', lw=2, label='AUC = %.2f' % auc_area)
34 plt.legend(loc='lower right')
35 plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
36 plt.title('ROC')
37 plt.ylabel('True Pos Rate')
38 plt.xlabel('False Pos Rate')
39 plt.show()
83def test_classification_report():
84 _check_classification_report(n_classes=2)

Related snippets