赞
踩
authon:小李江湖
date:2020-9-18
为了后续更好进行特征工程和模型构建,对数据的整体了解与数据预处理显得尤为重要,这个过程需要大量时间的对数据进行探索与整理,数据质量的好坏决定模型构建的成败。
#导入分析所需模块与库
import pandas as pd
import numpy as np
import warnings
import matplotlib.pyplot as plt
import seaborn as sns
warnings.filterwarnings('ignore')
#加载数据
data_train = pd.read_csv(r'G:\比赛\阿里天池\金融风控\train.csv')
data_test = pd.read_csv(r"G:\比赛\阿里天池\金融风控\testA.csv")
#读取部分文件
df = pd.read_csv(r'G:\比赛\阿里天池\金融风控\train.csv',nrows=5)
面对大量的数据,先通过.shape,columns,info(),head(),describe(),对数据有一个整体把握!
#数据大小
data_train.shape
#查看前n行数据
data_train.head() #默认为前五行
#查看数据包含特征
data_train.columns
#了解数据类型
data_train.info()
#查看相关统计量
data_train.describe()
# 查找只有y一个值的特征
one_feature = [col for col in data_train.columns if data_train[col].nunique()<=1]
one_feature
#选择数值类型变量
num_fea = list(data_train.select_dtypes(exclude=['object']).columns)
#非数值特征型类别
cate_fea = list(filter(lambda x:x not in num_fea,list(data_train.columns)))
非数值特征```
![在这里插入图片描述](https://img-blog.csdnimg.cn/20200918232707817.png#pic_center)
#对数值型数据处理
#找出离散型数据和连续型数据
def get_num_fea(data,feas,n):
serial_num = [] #连续变量
noser_num = [] #离散数据
for fea in feas:
if data[fea].nunique()<=n:
noser_num.append(fea)
continue
serial_num.append(fea)
return serial_num,noser_num
#serial_num:连续型特征变量
#noser_num :离散型特征变量
serial_num,noser_num = get_num_fea(data_train,num_fea,10)
离散型特征变量:
['term',
'homeOwnership',
'verificationStatus',
'isDefault',
'initialListStatus',
'applicationType',
'policyCode',
'n11',
'n12']
#### 3.2 通过value_counts()对各个离散特征探索
#离散型变量分析:term
data_train["term"].value_counts()
#对各个变量探索
for noser in noser_num:
print(noser,data_train[noser].value_counts(),sep=" ")
#连续数据可视化
plt.figure(figsize=(16,12))
plt.suptitle("secial number",fontsize=20)
plt.subplot(121) #设置
sub1 = sns.distplot(data_train['loanAmnt'])
sub1.set_title("loadAmnd Distrib",fontsize=18)
sub1.set_xlabel('')
sub1.set_ylabel('pro')
plt.subplot(122)
sub2 = sns.distplot(np.log(data_train['loanAmnt']))
sub2.set_title("loadAmnd Distrib",fontsize=18)
sub2.set_xlabel('')
sub2.set_ylabel('pro')```
cate_fea
data_train['subGrade'].value_counts()
#非数值可视化
plt.figure(figsize=(5,5))
sns.barplot(data_train['employmentLength'].value_counts(dropna=False)[:20],
data_train['employmentLength'].value_counts(dropna=False).keys()[:20])
plt.show()
fig,((ax1,ax2),(ax3,ax4))=plt.subplots(2,2,figsize=(12,7))
train_fr.groupby('grade')['grade'].count().plot(kind='barh',ax=ax1,
title='Count of grade fraud')
train_nofr.groupby('grade')['grade'].count().plot(kind='barh',ax=ax2,
title='Count of grade no-fraud')
train_fr.groupby("employmentLength")['employmentLength'].count().plot(kind='barh',ax=ax3,
title='Count of employmentLength fraud')
train_nofr.groupby("employmentLength")['employmentLength'].count().plot(kind='barh',ax=ax4,
title='Count of employmentLength no-fraud')
plt.show()
#将缺失值用0代替
data_train.fillna(0)
#缺失值处理 (使用均值代替)
def replace_missing_value(data_type,column_names):
for column_name in column_names:
data_type[column_name].fillna(data_type[column_name].mean,inplace=True)
查看缺失值
data_train.isnull().any().sum()
`计算缺失率:
missing = data_train.isnull().sum()/len(data_train)
本文主要针通过shape,columns,info(),head(),describe(),对数据的整体了解,探索数值化数据和非数值数值进行探索,数值化数据对离散数据和连续数据分别探索。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。