赞
踩
Task03 啦,希望能坚持学完,继续加油!自己在可视化和时间变量的处理上不太熟练,以后要多多练习呀~
先把用到的库调用下
import pandas as pd import numpy as np import matplotlib.pyplot as plt import seaborn as sns import datetime from tqdm import tqdm #这个库没怎么用过 from sklearn.preprocessing import LabelEncoder from sklearn.feature_selection import SelectKBest from sklearn.feature_selection import chi2 from sklearn.preprocessing import MinMaxScaler import xgboost as xgb import lightgbm as lgb from catboost import CatBoostRegressor import warnings from sklearn.model_selection import StratifiedKFold, KFold from sklearn.metrics import accuracy_score, f1_score, roc_auc_score, log_loss warnings.filterwarnings('ignore')
数据EDA部分我们已经对数据的大概和某些特征分布有了了解,数据预处理部分一般我们要处理一些EDA阶段分析出来的问题,这里介绍了数据缺失值的填充,时间格式特征的转化处理,某些对象类别特征的处理。
首先我们查找出数据中的对象特征和数值特征:
numerical_fea=list(df_train.select_dtypes(exclude=['object']).columns)
label='isDefault' #移除目标变量
numerical_fea.remove(label)
numerical_fea #数值特征
category_fea=list(filter(lambda x: x not in numerical_fea,list(df_train.columns)))
category_fea #对象特征
# 查看缺失值数量
df_train.isnull().sum()
df_train.isnull().sum()/df_train.shape[0]#查看变量缺失值比例
#按照平均数填充数值型特征
df_train[numerical_fea] =df_train[numerical_fea].fillna(df_train[numerical_fea].median())
df_test[numerical_fea] =df_test[numerical_fea].fillna(df_test[numerical_fea].median())
#按照众数填充类别型特征
df_train[category_fea] = df_train[category_fea].fillna(df_train[category_fea].mode())
df_test[category_fea] =df_test[category_fea].fillna(df_train[category_fea].mode())
df_train['issueDate'].min()
#转化成时间格式
for data in [df_train, df_test]:
data['issueDate'] = pd.to_datetime(data['issueDate'],format='%Y-%m-%d')
startdate = datetime.datetime.strptime('2007-06-01', '%Y-%m-%d')
#构造时间特征
data['issueDateDT'] = data['issueDate'].apply(lambda x: x-startdate).dt.days
df_train['employmentLength'].value_counts(dropna=False).sort_index()
def employmentLength_to_int(s):
if pd.isnull(s):
return s
else:
return np.int8(s.split()[0])
for data in [df_train, df_test]:
data['employmentLength'].replace(to_replace='10+ years', value='10 years', inplace=True)
data['employmentLength'].replace('< 1 year', '0 years', inplace=True)
data['employmentLength'] = data['employmentLength'].apply(employmentLength_to_int)
df_train['earliesCreditLine'].sample(5) #处理之前
for data in [df_train, df_test]:
data['earliesCreditLine'] = data['earliesCreditLine'].apply(lambda s: int(s[-4:])) #只取年份
查看类别数
cate_features = ['grade', 'subGrade', 'employmentTitle', 'homeOwnership',
'verificationStatus', 'purpose', 'postCode', 'regionCode', \
'applicationType', 'initialListStatus', 'title', 'policyCode']
for f in cate_features:
print(f, '类型数:', df_train[f].nunique())
注意: 这一步感觉没有看出什么实质性的差别?
# 类型数在2之上,又不是高维稀疏的,且纯分类特征
for data in [data_train, data_test_a]:
data = pd.get_dummies(data, columns=['subGrade', 'homeOwnership', 'verificationStatus',
'purpose', 'regionCode'], drop_first=True)
#编写函数
def find_outliers_by_3segama(data,fea):
data_std = np.std(data[fea])
data_mean = np.mean(data[fea])
outliers_cut_off = data_std * 3
lower_rule = data_mean - outliers_cut_off
upper_rule = data_mean + outliers_cut_off
data[fea+'_outliers'] = data[fea].apply(lambda x:str('异常值') if x > upper_rule or x <
lower_rule else '正常值')
return data
numerical_fea.remove('issueDate')
numerical_fea #去掉日期变量
df_train = df_train.copy()
for fea in numerical_fea:
df_train = find_outliers_by_3segama(df_train,fea)
print(df_train[fea+'_outliers'].value_counts())
print(df_train.groupby(fea+'_outliers')['isDefault'].sum())
print('\n')
删除异常值
for fea in numerical_fea:
df_train = df_train[df_train[fea+'_outliers']=='正常值']
df_train = df_train.reset_index(drop=True)
总结一句话: 四分位数会将数据分为三个点和四个区间,IQR = Q3 -Q1,下触须=Q1 − 1.5x IQR,上触须=Q3+ 1.5x IQR;
-# 通过除法映射到间隔均匀的分箱中,每个分箱的取值范围都是loanAmnt/1000
df_train['loanAmnt_bin1'] = np.floor_divide(df_train['loanAmnt'], 1000)
## 通过对数函数映射到指数宽度分箱
df_train['loanAmnt_bin2'] = np.floor(np.log10(df_train['loanAmnt']))
df_train['loanAmnt_bin3'] = pd.qcut(df_train['loanAmnt'], 10, labels=False)
卡方分箱及其他分箱方法的尝试下次补上!!
for col in ['grade', 'subGrade']:
temp_dict = df_train.groupby([col])['isDefault'].agg(['mean']).reset_index().rename(columns={'mean': col + '_target_mean'}) #变成DataFrame
temp_dict.index = temp_dict[col].values # 索引变成类别变量
temp_dict
temp_dict = temp_dict[col + '_target_mean'].to_dict() #变成字典, 方便后续直接数据映射
temp_dict
for col in ['grade', 'subGrade']:
temp_dict = df_train.groupby([col])['isDefault'].agg(['mean']).reset_index().rename(columns={'mean': col + '_target_mean'})
temp_dict.index = temp_dict[col].values
temp_dict = temp_dict[col + '_target_mean'].to_dict()
df_train[col + '_target_mean'] = df_train[col].map(temp_dict) #直接映射
df_test[col + '_target_mean'] = df_test_[col].map(temp_dict) #直接映射
for df in [data_train, data_test_a]:
for item in ['n0','n1','n2','n2.1','n4','n5','n6','n7','n8','n9','n10','n11','n12','n13','n14']:
df['grade_to_mean_' + item] = df['grade'] / df.groupby([item])['grade'].transform('mean') #如果不用transform,只会得到n0几个类别的grade的均值,transform转化成对应Dataframe,使得Dataframe中每一个n0的值都有对应的grader的均值
df['grade_to_std_' + item] = df['grade'] / df.groupby([item])['grade'].transform('std')
-【还没来得及尝试,以后一定要补上!!!】
one-hot编码和label-hot编码的区别
one-hot编码和label-encode编码,这个链接里包含了label-encode和one-hot编码的官方文档,官方文档看起来更加容易理解!
for col in tqdm(['employmentTitle', 'postCode', 'title','subGrade']):
le = LabelEncoder() #调用labelencoder
le.fit(list(df_train[col].astype(str).values) +list(df_test[col].astype(str).values)) #匹配要编码的类别,变量在训练集和测试机的合并类别
df_train[col] = le.transform(list(df_train[col].astype(str).values))#讲变量类别编码赋值给原变量
df_test[col] = le.transform(list(df_test[col].astype(str).values))
print('Label Encoding 完成')
# list(le.inverse_transform([2, 2, 1])) inverse_transform代码可以看到编码后的类别对应的数值
#伪代码
for fea in [要归一化的特征列表]:
data[fea] = ((data[fea] - np.min(data[fea])) / (np.max(data[fea]) - np.min(data[fea])))
贴一个特征选择的详细介绍链接,知乎
特征选择技术可以精简掉无用的特征,以降低最终模型的复杂性,它的最终目的是得到一个简约模型,在不降低预测准确率或对预测准确率影响不大的情况下提高计算速度。特征选择不是为了减少训练时间(实际上,一些技术会增加总体训练时间),而是为了减少模型评分时间。
from sklearn.feature_selection import VarianceThreshold
VarianceThreshold(threshold=3).fit_transform(X_train)
#threshold为方差的阈值,返回值是特征选择后的数据
from sklearn.feature_selection import SelectKBest
from scipy.stats import pearsonr
#选择K个最好的特征,返回选择特征后的数据
#第一个参数为计算评估特征是否好的函数,该函数输入特征矩阵和目标向量,
#输出二元组(评分,P值)的数组,数组第i项为第i个特征的评分和P值。在此定义为计算相关系数
#参数k为选择的特征个数
SelectKBest(k=5).fit_transform(train,target_train)
卡方值描述两个事件的独立性或者描述观察值与期望值的差异程度。
卡方检验原假设为观察值频数与期望值频数一致。检验类型一般分为以下两种:
拟合优度检验:对一个分类变量的检验,如检验硬币是否均匀。自由度=分类变量类型个数-1。
独立性检验(列联分析):对两个分类变量的检验,比如性别因素对是否吃早餐这一事件是否造成影响。自由度=(分类自变量类型个数-1)*(分类因变量类型个数-1)。
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
#参数k为选择的特征个数
SelectKBest(chi2, k=5).fit_transform(train,target_train)
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import chi2
SelectKBest(chi2,k=2).fit_transform(X_train,Y_train)
#选择k个最好的特征,返回选择特征后的数据
f_classif 用于特征为连续变量、输出为离散变量的分类问题,是基于方差分析的检验,检验的是不同类别下特征的均值。
f_regression用于特征和输出均为连续变量的回归问题,是基于样本相关系数的检验,理论上可以用t检验代替,但sklearn将其转化成了一个F值。
from sklearn.feature_selection import SelectKBest
from sklearn.feature_selection import f_classif, f_regression
SelectKBest(f_classif,k=2).fit_transform(X_train,Y_train) #分类问题
SelectKBest(f_regression,k=2).fit_transform(X_train,Y_train) #回归问题
from sklearn.feature_selection import SelectKBest
from minepy import MINE
#由于MINE的设计不是函数式的,定义mic方法将其为函数式的,
#返回一个二元组,二元组的第2项设置成固定的P值0.5
def mic(x, y):
m = MINE()
m.compute_score(x, y)
return (m.mic(), 0.5)
#参数k为选择的特征个数
SelectKBest(lambda X, Y: array(map(lambda x:mic(x, Y), X.T)).T,
k=2).fit_transform(train,target_train)
from sklearn.feature_selection import RFE
from sklearn.linear_model import LogisticRegression
#递归特征消除法,返回特征选择后的数据
#参数estimator为基模型
#参数n_features_to_select为选择的特征个数
RFE(estimator=LogisticRegression(),
n_features_to_select=2).fit_transform(train,target_train)
from sklearn.feature_selection import SelectFromModel
from sklearn.linear_model import LogisticRegression
#带L1惩罚项的逻辑回归作为基模型的特征选择
SelectFromModel(LogisticRegression(penalty="l1", C=0.1)).fit_transform(train,target_train)
from sklearn.feature_selection import SelectFromModel
from sklearn.ensemble import GradientBoostingClassifier
#GBDT作为基模型的特征选择
SelectFromModel(GradientBoostingClassifier()).fit_transform(train,target_train)
总结:这次时间仓促,没有预留出足够的时间完成Task3,基本上只是简单的运行了一下代码,后面自己私下要花时间多理解本次任务的代码,多根据数据情况去想着怎么去预处理以及怎么根据实际情况去衍生新的变量。加油!
补充:
- # 删除不需要的数据,subGrade 贷款等级之子级[类别太多,没有细分】先直接 删除了这个变量
for data in [df_train, df_test]:
data.drop(['issueDate','id','subGrade'], axis=1,inplace=True)
"纵向用缺失值上面的值替换缺失值"
df_train = df_train.fillna(axis=0,method='ffill')
x_train = df_train.drop(['isDefault','id'], axis=1)
#计算协方差
data_corr = x_train.corrwith(df_train.isDefault) #计算相关性
result = pd.DataFrame(columns=['features', 'corr'])
result['features'] = data_corr.index
result['corr'] = data_corr.values
#当然也可以直接看图
data_numeric = data_train[numerical_fea]
correlation = data_numeric.corr()
f , ax = plt.subplots(figsize = (7, 7))
plt.title('Correlation of Numeric Features with Price',y=1,size=16)
sns.heatmap(correlation,square = True, vmax=0.8)
features = [f for f in data_train.columns if f not in ['id','issueDate','isDefault'] and '_outliers' not in f]
x_train = data_train[features]
x_test = data_test_a[features]
y_train = data_train['isDefault']
def cv_model(clf, train_x, train_y, test_x, clf_name): folds = 5 seed = 2020 kf = KFold(n_splits=folds, shuffle=True, random_state=seed) train = np.zeros(train_x.shape[0]) test = np.zeros(test_x.shape[0]) cv_scores = [] for i, (train_index, valid_index) in enumerate(kf.split(train_x, train_y)): print('************************************ {} ************************************'.format(str(i+1))) trn_x, trn_y, val_x, val_y = train_x.iloc[train_index], train_y[train_index], train_x.iloc[valid_index], train_y[valid_index] if clf_name == "lgb": train_matrix = clf.Dataset(trn_x, label=trn_y) valid_matrix = clf.Dataset(val_x, label=val_y) params = { 'boosting_type': 'gbdt', 'objective': 'binary', 'metric': 'auc', 'min_child_weight': 5, 'num_leaves': 2 ** 5, 'lambda_l2': 10, 'feature_fraction': 0.8, 'bagging_fraction': 0.8, 'bagging_freq': 4, 'learning_rate': 0.1, 'seed': 2020, 'nthread': 28, 'n_jobs':24, 'silent': True, 'verbose': -1, } model = clf.train(params, train_matrix, 50000, valid_sets=[train_matrix, valid_matrix], verbose_eval=200,early_stopping_rounds=200) val_pred = model.predict(val_x, num_iteration=model.best_iteration) test_pred = model.predict(test_x, num_iteration=model.best_iteration) # print(list(sorted(zip(features, model.feature_importance("gain")), key=lambda x: x[1], reverse=True))[:20]) if clf_name == "xgb": train_matrix = clf.DMatrix(trn_x , label=trn_y) valid_matrix = clf.DMatrix(val_x , label=val_y) params = {'booster': 'gbtree', 'objective': 'binary:logistic', 'eval_metric': 'auc', 'gamma': 1, 'min_child_weight': 1.5, 'max_depth': 5, 'lambda': 10, 'subsample': 0.7, 'colsample_bytree': 0.7, 'colsample_bylevel': 0.7, 'eta': 0.04, 'tree_method': 'exact', 'seed': 2020, 'nthread': 36, "silent": True, } watchlist = [(train_matrix, 'train'),(valid_matrix, 'eval')] model = clf.train(params, train_matrix, num_boost_round=50000, evals=watchlist, verbose_eval=200, early_stopping_rounds=200) val_pred = model.predict(valid_matrix, ntree_limit=model.best_ntree_limit) test_pred = model.predict(test_x , ntree_limit=model.best_ntree_limit) if clf_name == "cat": params = {'learning_rate': 0.05, 'depth': 5, 'l2_leaf_reg': 10, 'bootstrap_type': 'Bernoulli', 'od_type': 'Iter', 'od_wait': 50, 'random_seed': 11, 'allow_writing_files': False} model = clf(iterations=20000, **params) model.fit(trn_x, trn_y, eval_set=(val_x, val_y), cat_features=[], use_best_model=True, verbose=500) val_pred = model.predict(val_x) test_pred = model.predict(test_x) train[valid_index] = val_pred test = test_pred / kf.n_splits cv_scores.append(roc_auc_score(val_y, val_pred)) print(cv_scores) print("%s_scotrainre_list:" % clf_name, cv_scores) print("%s_score_mean:" % clf_name, np.mean(cv_scores)) print("%s_score_std:" % clf_name, np.std(cv_scores)) return train, test
def lgb_model(x_train, y_train, x_test):
lgb_train, lgb_test = cv_model(lgb, x_train, y_train, x_test, "lgb")
return lgb_train, lgb_test
def xgb_model(x_train, y_train, x_test):
xgb_train, xgb_test = cv_model(xgb, x_train, y_train, x_test, "xgb")
return xgb_train, xgb_test
def cat_model(x_train, y_train, x_test):
cat_train, cat_test = cv_model(CatBoostRegressor, x_train, y_train, x_test, "cat")
lgb_train, lgb_test = lgb_model(x_train, y_train, x_test)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。