Let's set the regression of xgboost
.
For xgboost
, please refer to other HP.
"What is Xgboost?" & A memo about the main parameters "
https://qiita.com/2357gi/items/913af8b813b069617aad
Later, I referred to the parameters on the official website. https://xgboost.readthedocs.io/en/latest/parameter.html
I put in various things, but the decision tree system tends to be overfitted, so I think it is better to set the parameters that control it firmly.
In xgboost
, it is lambda
and ʻalpha, but when setting with
python, specify it with
reg_ like
reg_lambdaand
reg_alpha`.
#Set the objective function of optuna
#It is a parameter setting of gtree.
def objective(trial):
eta = trial.suggest_loguniform('eta', 1e-8, 1.0)
gamma = trial.suggest_loguniform('gamma', 1e-8, 1.0)
max_depth = trial.suggest_int('max_depth', 1, 20)
min_child_weight = trial.suggest_loguniform('min_child_weight', 1e-8, 1.0)
max_delta_step = trial.suggest_loguniform('max_delta_step', 1e-8, 1.0)
subsample = trial.suggest_uniform('subsample', 0.0, 1.0)
reg_lambda = trial.suggest_uniform('reg_lambda', 0.0, 1000.0)
reg_alpha = trial.suggest_uniform('reg_alpha', 0.0, 1000.0)
regr =xgb.XGBRegressor(eta = eta, gamma = gamma, max_depth = max_depth,
min_child_weight = min_child_weight, max_delta_step = max_delta_step,
subsample = subsample,reg_lambda = reg_lambda,reg_alpha = reg_alpha)
score = cross_val_score(regr, X_train, y_train, cv=5, scoring="r2")
r2_mean = score.mean()
print(r2_mean)
return r2_mean
#Find the optimal value with optuna
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=500)
#Fits tuned hyperparameters
optimised_rf = xgb.XGBRegressor(eta = study.best_params['eta'],gamma = study.best_params['gamma'],
max_depth = study.best_params['max_depth'],min_child_weight = study.best_params['min_child_weight'],
max_delta_step = study.best_params['max_delta_step'],subsample = study.best_params['subsample'],
reg_lambda = study.best_params['reg_lambda'],reg_alpha = study.best_params['reg_alpha'])
optimised_rf.fit(X_train ,y_train)
This is the result of Boston.
It's all.
# -*- coding: utf-8 -*-
from sklearn import datasets
import xgboost as xgb
from sklearn.model_selection import train_test_split
from sklearn.metrics import r2_score
import pandas as pd
import optuna
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import cross_val_score
#Load a Boston dataset
boston = datasets.load_boston()
#print(boston['feature_names'])
#Separate features and objective variables
X = boston['data']
y = boston['target']
X_train, X_test, y_train, y_test = train_test_split(X, y, train_size=0.8)
#Set the objective function of optuna
#It is a parameter setting of gtree.
def objective(trial):
eta = trial.suggest_loguniform('eta', 1e-8, 1.0)
gamma = trial.suggest_loguniform('gamma', 1e-8, 1.0)
max_depth = trial.suggest_int('max_depth', 1, 20)
min_child_weight = trial.suggest_loguniform('min_child_weight', 1e-8, 1.0)
max_delta_step = trial.suggest_loguniform('max_delta_step', 1e-8, 1.0)
subsample = trial.suggest_uniform('subsample', 0.0, 1.0)
reg_lambda = trial.suggest_uniform('reg_lambda', 0.0, 1000.0)
reg_alpha = trial.suggest_uniform('reg_alpha', 0.0, 1000.0)
regr =xgb.XGBRegressor(eta = eta, gamma = gamma, max_depth = max_depth,
min_child_weight = min_child_weight, max_delta_step = max_delta_step,
subsample = subsample,reg_lambda = reg_lambda,reg_alpha = reg_alpha)
score = cross_val_score(regr, X_train, y_train, cv=5, scoring="r2")
r2_mean = score.mean()
print(r2_mean)
return r2_mean
#Find the optimal value with optuna
study = optuna.create_study(direction='maximize')
study.optimize(objective, n_trials=500)
#Fits tuned hyperparameters
optimised_rf = xgb.XGBRegressor(eta = study.best_params['eta'],gamma = study.best_params['gamma'],
max_depth = study.best_params['max_depth'],min_child_weight = study.best_params['min_child_weight'],
max_delta_step = study.best_params['max_delta_step'],subsample = study.best_params['subsample'],
reg_lambda = study.best_params['reg_lambda'],reg_alpha = study.best_params['reg_alpha'])
optimised_rf.fit(X_train ,y_train)
#View results
print("Fits training data")
print("Training data accuracy=", optimised_rf.score(X_train, y_train))
pre_train = optimised_rf.predict(X_train)
print("Fits test data")
print("Test data accuracy=", optimised_rf.score(X_test, y_test))
pre_test = optimised_rf.predict(X_test)
#Graph display
plt.scatter(y_train, pre_train, marker='o', cmap = "Blue", label="train")
plt.scatter(y_test ,pre_test, marker='o', cmap= "Red", label="test")
plt.title('boston')
plt.xlabel('measurment')
plt.ylabel('predict')
#Fine-tune the text here
x = 30
y1 = 12
y2 = 10
s1 = "train_r2 =" + str(optimised_rf.score(X_train, y_train))
s2 = "test_r2 =" + str(optimised_rf.score(X_test, y_test))
plt.text(x, y1, s1)
plt.text(x, y2, s2)
plt.legend(loc="upper left", fontsize=14)
plt.show()
Recommended Posts