赞
踩
本文为实例,相关方法性说明参考:
https://blog.csdn.net/weixin_45520028/article/details/108977748
环境:Python 3.7
平台:jupyter
import pandas as pd
import numpy as np
import matplotlib as plt
from pylab import mpl
import xgboost as xgb
from xgboost import XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import LabelEncoder
from sklearn import metrics
import shap
import warnings
warnings.filterwarnings("ignore")
pd.set_option('max_column', 40)
plt.rcParams['font.sans-serif'] = ['Arial Unicode MS'] # 支持中文显示
# notebook环境下,加载用于可视化的JS代码
shap.initjs()
# 读取数据================================================================
df1 = pd.read_csv("./data_v4_null_0.csv", encoding='gbk')
df2 = pd.read_csv("./data_v4_null_1.csv", encoding='gbk')
df3 = pd.read_csv("./data_v4_null_2.csv", encoding='gbk')
frames = [df1, df2, df3]
data = pd.concat([df1, df2, df3], ignore_index=True, axis=0)
# 数据清洗================================================================
# 'pinche_poten',字段都为空,不可用
data.drop(['pinche_poten'], axis=1, inplace=True)
# 剔除距离超1000km的一个点
data = data[data['distance'] < 1000]
# 剔除部分字段为NaN的部分行
data = data.dropna(subset=['sub_sens', 'ord_sub_sens', 'life_stage'])
# 删除特定的行,即th_type=NaN
data = data.dropna(subset=['th_type'])
data = data.fillna(0)
data = data.reset_index(drop=True)
# XGBoost要求输入为数值型,先进行数据转换====================================
data_deal = data[['sub_sens', 'ord_sub_sens', 'life_stage', 'scene_l1', 'time_interval']]
# 先转为str格式
data_deal['sub_sens'] = data_deal['sub_sens'].astype(str)
data_deal['ord_sub_sens'] = data_deal['ord_sub_sens'].astype(str)
data_deal['life_stage'] = data_deal['life_stage'].astype(str)
data_deal['scene_l1'] = data_deal['scene_l1'].astype(str)
data_deal['time_interval'] = data_deal['time_interval'].astype(str)
# 字符转换为数值
features = []
feature_tables = []
for i in range(0, data_deal.shape[1]):
label_encoder = LabelEncoder()
feature = label_encoder.fit_transform(data_deal.iloc[:, i])
feature_table = label_encoder.inverse_transform(feature)
features.append(feature)
feature_tables.append(feature)
feature_tables.append(feature_table)
# 反转label_encoder.inverse_transform(feature)
data_deal2 = np.array(features)
data_deal2 = data_deal2.reshape(data_deal.shape[0], data_deal.shape[1])
data_deal3 = pd.DataFrame(data_deal2)
# 特征对应表
feature_tables = pd.DataFrame(np.array(feature_tables)).T
# 查看各字段的统计值
feature_tables.iloc[:, 1].value_counts()
feature_tables.iloc[:, 3].value_counts()
feature_tables.iloc[:, 5].value_counts()
feature_tables.iloc[:, 7].value_counts()
feature_tables.iloc[:, 9].value_counts()
# sub_sens对应表
fea_sub_sens = feature_tables.iloc[:,:2].drop_duplicates().reset_index(drop=True).sort_values(by=1)
# ord_sub_sens对应表
fea_ord_sub_sens = feature_tables.iloc[:,2:4].drop_duplicates().reset_index(drop=True).sort_values(by=3)
# life_stage对应表
fea_life_stage = feature_tables.iloc[:,4:6].drop_duplicates().reset_index(drop=True).sort_values(by=5)
# scene对应表
fea_scene = feature_tables.iloc[:,6:8].drop_duplicates().reset_index(drop=True).sort_values(by=7)
# interval对应表
fea_interval = feature_tables.iloc[:,8:10].drop_duplicates().reset_index(drop=True).sort_values(by=9)
# data返回替换'sub_sens', 'ord_sub_sens', 'life_stage', 'scene_l1', 'time_interval'
data['sub_sens'] = data_deal3.iloc[:, 0]
data['ord_sub_sens'] = data_deal3.iloc[:, 1]
data['life_stage'] = data_deal3.iloc[:, 2]
data['scene_l1'] = data_deal3.iloc[:, 3]
data['time_interval'] = data_deal3.iloc[:, 4]
# 不同类型的输出结果抽取同数量组数===============================================
data_s1 = data[data['th_type'] == 1].sample(n=650000, random_state=1)
data_s2 = data[data['th_type'] == 2].sample(n=650000, random_state=1)
data_s0 = data[data['th_type'] == 0].sample(n=650000, random_state=1)
# 拼接新的数据集
data = pd.concat([data_s1, data_s2, data_s0], ignore_index=True, axis=0)
# 添加一列随机数,为了校验因子准确性(重要性排序在随机数后面的可以忽略)
data['随机数'] = np.random.randint(0, 4, size = len(data))
# 查看data
data
# 图1
# XGBoost建模数据准备=====================================================
# 最后一列为输出结果
data_result = data.iloc[:, 27]
# 第1-27为特征值
data_input = data.iloc[:, 1:27]
# 准备xgb训练集和测试集
train_x, test_x, train_y, test_y = train_test_split(data_input, data_result, test_size=0.01, random_state=1)
# 查看训练集和测试集的特征值形状
print(train_x.shape, test_x.shape)
# 查看训练集各类型选择的抽样数量
train_y.value_counts()
# xgb:多分类================================================================
dtrain = xgb.DMatrix(train_x, label=train_y)
dtest = xgb.DMatrix(test_x)
# 参数
# objective:多分类'multi:softmax'返回预测的类别,
params = {'booster': 'gbtree',
'objective': 'multi:softmax', #多分类'multi:softmax'返回预测的类别(不是概率),'multi:softprob'返回概率
'num_class': 3,
'eval_metric': 'merror', #二分类用’auc‘,多分类用'mlogloss'或'merror'
'max_depth': 7,
'lambda': 15,
'subsample': 0.75,
'colsample_bytree': 0.75,
'min_child_weight': 1,
'eta': 0.025,
'seed': 0,
'nthread': 8,
'silent': 1,
'gamma': 0.15,
'learning_rate': 0.01}
watchlist = [(dtrain, 'train')]
# 建模与预测:NUM_BOOST_round迭代次数和数的个数一致
model = xgb.train(params, dtrain, num_boost_round=50, evals=watchlist)
ypred = model.predict(dtest)
# test_y为data抽样的索引,重置后便于与模型预测结果比较===============================
test_y = test_y.reset_index(drop=True)
# 手动计算准确率
cnt1 = 0
cnt2 = 0
for i in range(len(test_y)):
if ypred[i] == test_y[i]:
cnt1 += 1
else:
cnt2 += 1
print("Accuracy: %.2f %% " % (100 * cnt1 / (cnt1 + cnt2)))
# 输出为:
# Accuracy: 69.26 %
# 混淆矩阵,对角线为正确,其他为误分类
metrics.confusion_matrix(test_y,ypred)
# 输出为:
# array([[4483, 963, 1153],
# [ 694, 4863, 844],
# [1004, 1336, 4160]])
# 模型结果解释==================================================================
xgb.plot_importance(model)
# 图2
# SHAP解释模型=================================================================
# 解决xgb中utf-8不能编码问题
# 新版save_raw()开头多4个字符'binf'
model_modify = model.save_raw()[4:]
def myfun(self=None):
return model_modify
model.save_raw = myfun
# SHAP计算
explainer = shap.TreeExplainer(model)
shap_values = explainer.shap_values(train_x)
# 特征统计值
shap.summary_plot(shap_values, train_x)
# 图3
# SHAP值解释
shap.summary_plot(shap_values[1], train_x, max_display=15)
# 图4
shap.summary_plot(shap_values[2], train_x, max_display=15)
shap.summary_plot(shap_values[0], train_x, max_display=15)
# 训练集第1个样本对于输出结果为1的SHAP解释
shap.force_plot(explainer.expected_value[1], shap_values[1][1,:], train_x.iloc[1,:])
# 图5
# 统计图解释
cols = train_x.columns.tolist()
shap.bar_plot(shap_values[1][1,:],feature_names=cols)
# 图6
# 输出第1个样本的特征值和shap值
sample_explainer = pd.DataFrame()
sample_explainer['feature'] = cols
sample_explainer['feature_value'] = train_x[cols].iloc[1].values
sample_explainer['shap_value'] = shap_values[1]
sample_explainer
# 图7
# 单个特征'dangou_ratio'与模型预测结果的关系
shap.dependence_plot('dangou_ratio', shap_values[1], train_x[cols], display_features=train_x[cols],interaction_index=None)
# 图8
# 前1000个样本的shap累计解释,可选择坐标内容
shap.force_plot(explainer.expected_value[1], shap_values[1][:1000,:], train_x.iloc[:1000,:])
# 图9
代码中部分结果:
图1
图2
图3
图4
图5
图6
图7
图8
图9
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。