XGBoost回归
import xgboost as xgb
from xgboost import plot_importance,plot_tree
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
# 加载数据集
boston = load_boston()
# 获取特征值和目标值
x,y = boston.data, boston.target
# 获取特征名称
feature_name = boston.feature_names
# 划分数据集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
# 参数设置
# 训练算法参数设置
params = {
# 通用参数
'booster': 'gbtree', #使用的弱学习器,有两种选择gbtree(默认)和gbliner,gbtree是基于
#树模型的提升计算,gbliner是基于线性模型的提升计算
# 任务参数
'objective': 'reg:gamma',#回归的损失函数,gamma回归
# 提升参数
'gamma': 0.1,#叶子节点进行划分时需要损失函数减少的最小值
'max_depth': 5,#树的最大深度,缺省值为5,可设置其他值
'lambda': 3,#正则化权重
'subsample': 0.7,#训练模型的样本占总样本的比例,用于防止过拟合
'colsample_bytree': 0.7,#建立树时对特征进行采样的比例
'min_child_weight': 3,#叶子节点继续划分的最小样本权重和
'eta': 0.1,#加法模型中使用的收缩步长
'seed': 1000,#随机数种子
'nthread':4,
}
plst = params.items()
# 数据集格式转化
dtrain = xgb.DMatrix(x_train, y_train, feature_names = feature_name)
dtest = xgb.DMatrix(x_test, feature_names = feature_name)
# 模型训练
num_rounds = 30
model = xgb.train(plst, dtrain, num_rounds)
# 模型预测
y_pred = model.predict(dtest)
# 显示重要特征
plot_importance(model, importance_type="weight")
plt.show()
# 可视化树的生成情况,num_tree是树的索引
plot_tree(model, num_trees=17)
plt.show()
# 将基学习器输出到txt文件中
model.dump_model('model2.txt')
图片:
sklearn-XGBoost回归
import xgboost as xgb
from xgboost import plot_importance,plot_tree
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_boston
import matplotlib.pyplot as plt
# 加载数据集
boston = load_boston()
# 获取特征值和目标值
x,y = boston.data, boston.target
# 获取特征名称
feature_name = boston.feature_names
# 划分数据集
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
# 模型训练
model = xgb.XGBRFRegressor(max_depth=5, learning_rate=0.1, n_estimators=50, silent=True, objective='reg:gamma')
model.fit(x_train,y_train)
# 模型预测
y_pred = model.predict(x_test)
# 显示重要特征
plot_importance(model)
plt.show()
# 可视化树的生成情况,num_tree是树的索引
plot_tree(model, num_trees=17)
plt.show()
XGBoost分类
import time
import numpy as np
import xgboost as xgb
from xgboost import plot_importance,plot_tree
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.datasets import load_boston
import matplotlib
import matplotlib.pyplot as plt
import os
iris = load_iris()
x,y = iris.data,iris.target
x_train,x_test,y_train,y_test = train_test_split(x, y, test_size=0.2, random_state=1234565)
# 训练算法参数设置
params = {
# 通用参数
'booster': 'gbtree', #使用的弱学习器,有两种选择gbtree(默认)和gbliner,gbtree是基于
#树模型的提升计算,gbliner是基于线性模型的提升计算
'ntread': 4,#XGBoost运行时的线程数,缺省时是当前系统获得的最大线程数
'silent': 0,#0:表示打印运行时的信息,1:表示以缄默方式运行,默认为0
'num_feature': 4,#boosting过程中使用的特征维数
'seed': 1000,#随机数种子
# 任务参数
'objective': 'multi:softmax',#多分类的softmax,objective用来定义学习任务及相应的损失函数
'num_class': 3,#类别总数
# 提升参数
'gamma': 0.1,#叶子节点进行划分时需要损失函数减少的最小值
'max_depth': 6,#树的最大深度,缺省值为6,可设置其他值
'lambda': 2,#正则化权重
'subsample': 0.7,#训练模型的样本占总样本的比例,用于防止过拟合
'colsample_bytree': 0.7,#建立树时对特征进行采样的比例
'min_child_weight': 3,#叶子节点继续划分的最小样本权重和
'eta': 0.1#加法模型中使用的收缩步长
}
plst = params.items()
# 数据集格式转换
dtrain = xgb.DMatrix(x_train, y_train)
dtest = xgb.DMatrix(x_test)
# 迭代次数,对于分类问题,每个类别的迭代次数,所以,总的基学习器个数 = 迭代次数 * 类别个数
num_rounds = 50
model = xgb.train(plst, dtrain, num_rounds)#xgboost模型训练
# 对测试集进行预测
y_pred = model.predict(dtest)
# 计算准确率
accuracy = accuracy_score(y_test,y_pred)
print("accuracy: %.2f%%" % (accuracy*100.0))
# 显示重要特征
plot_importance(model)
plt.show()
# 可视化树的生成情况,num_tree是树的索引
plot_tree(model, num_trees=5)
plt.show()
# 将基学习器输出到txt文件中
model.dump_model('model.txt')
图片:
sklearn-XGBoost分类
import xgboost as xgb
from xgboost import plot_importance,plot_tree
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
iris = load_iris()
x,y = iris.data,iris.target
feature_name = iris.feature_names
x_train,x_test,y_train,y_test = train_test_split(x, y, test_size=0.2, random_state=3)
# 模型训练
model = xgb.XGBClassifier(max_depth=5, n_estimators=50, silent = True,objective='multi:softmax',feature_names=feature_name)
model.fit(x_train,y_train)
# 预测
y_pred = model.predict(x_test)
# 计算准确率
accuracy = accuracy_score(y_test,y_pred)
print("accuracy: %.2f%%" % (accuracy*100.0))
# 显示重要特征
plot_importance(model)
plt.show()
# 可视化树的生成情况,num_tree是树的索引
plot_tree(model, num_trees=5)
plt.show()
图片:
LightGBM
import datetime
import numpy as np
import pandas as pd
import lightgbm as lgb
from sklearn.datasets import load_breast_cancer
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
# 加载数据集
breast = load_breast_cancer()
# 获取特征值和目标值
x,y = breast.data,breast.target
# 获取特征名称
feature_name = breast.feature_names
# 数据集划分
x_train,x_test,y_train,y_test = train_test_split(x,y,test_size=0.2,random_state=0)
# 数据格式转换
lgb_train = lgb.Dataset(x_train,y_train)
lgb_eval = lgb.Dataset(x_test,y_test,reference=lgb_train)
# 参数设置
boost_round = 50#迭代次数
early_stop_rounds = 10#验证数据在early_stop_rounds轮中未提高,则提前停止
params = {
'boosting_type':'gbdt',#设置提mu'bia'han'shu升类型
'objective':'regression',#目标函数
'metric':{'12','auc'},#评估函数
'num_leaves':31,#叶子节点数
'learning_rate':0.05,#学习速率
'feature_fraction':0.9,#建树的特征选择比例
'bagging_fraction':0.8,#建树的样本采集比例
'bagging_freq':5,#k意味着每k次迭代执行bagging
'verbose':1#<0,显示致命的,=0显示错误(警告),>0显示信息
}
# 训练模型,加入提前停止的功能
results = {}
gbm = lgb.train(
params,
lgb_train,
num_boost_round=boost_round,
valid_sets=(lgb_eval,lgb_train),
valid_names=('validate','train'),
early_stopping_rounds=early_stop_rounds,
evals_result=results
)
# 模型预测
y_pred = gbm.predict(x_test,num_iteration=gbm.best_iteration)
print(y_pred)
lgb.plot_metric(results)
plt.show()
# 绘制重要的特征
lgb.plot_importance(gbm,importance_type='split')
plt.show()
图片: