Every line of 'xgbclassifier python' code snippets is scanned for vulnerabilities by our powerful machine learning engine that combs millions of open source libraries, ensuring your Python code is secure.
327 def run_xgb(train_X, test_X, train_y, test_y, depth=6, a=0.0, l=1.5, seed=0): 328 param = {'max_depth':depth, 'num_round':20, 'eta':0.3, 'silent':1, 329 'objective':'binary:logistic', 'eval_metric':['auc', 'error'], 330 'alpha': a, 'lambda':l } 331 if seed != 0: # specific random seed entered 332 param['seed'] = seed 333 param['colsample_bytree'] = 0.5 334 param['colsample_bylevel'] = 0.5 335 train_xgb = xgb.DMatrix(train_X, label=train_y) 336 test_xgb = xgb.DMatrix(test_X, label=test_y) 337 bst = xgb.train(param, train_xgb) 338 ypred = bst.predict(test_xgb) 339 metrics = gen_eval_metrics(test_y, ypred) 340 accuracy = metrics[0] 341 342 #cor = sum([int(ypred[i] + 0.5) == test_y[i] for i in range(len(ypred))]) 343 #accuracy = cor / len(test_y) 344 print('Fold accuracy: ' + str(accuracy)) 345 return metrics
129 def test_xgboost_multiclass(): 130 try: 131 import xgboost 132 except Exception as e: 133 print("Skipping test_xgboost_multiclass!") 134 return 135 import shap 136 137 # train XGBoost model 138 X, Y = shap.datasets.iris() 139 model = xgboost.XGBClassifier(objective="binary:logistic", max_depth=4) 140 model.fit(X, Y) 141 142 # explain the model's predictions using SHAP values (use pred_contrib in LightGBM) 143 shap_values = shap.TreeExplainer(model).shap_values(X) 144 145 # ensure plot works for first class 146 shap.dependence_plot(0, shap_values[0], X, show=False)
22 def train_model(self, train_x, train_y): 23 """ 24 use Feature vector 25 :param train_x: 26 :param train_y: 27 :return: 28 """ 29 self.clf = xgb.XGBClassifier() 30 self.clf.fit(train_x, train_y, eval_metric=self.eval_metric, 31 eval_set=[(train_x, train_y)]) 32 self.init = True 33 evals_result = self.clf.evals_result() 34 print('evals_result:', evals_result) 35 with open(self.xgb_model_name, 'wb')as f: 36 pickle.dump(self.clf, f, True)