赞
踩
import pandas as pd
import numpy as np
import seaborn as sns
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
from sklearn.datasets import load_boston
from matplotlib import pyplot as plt
from sklearn.decomposition import PCA
from mpl_toolkits.mplot3d import Axes3D
plt.rcParams['font.sans-serif'] = ['SimHei'] # 设置在matplotlib上的中文字体
plt.rcParams['axes.unicode_minus'] = False # 在matplotlib绘图正常显示符号
x_data = load_boston().data # 导入所有特征变量
y_data = load_boston().target # 导入目标变量房价
feature_name = load_boston().feature_names # 导入特征名
df = pd.DataFrame(x_data,columns=feature_name)
df['MEDV'] = y_data
df.head()
CRIM | ZN | INDUS | CHAS | NOX | RM | AGE | DIS | RAD | TAX | PTRATIO | B | LSTAT | MEDV | |
---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0 | 0.00632 | 18.0 | 2.31 | 0.0 | 0.538 | 6.575 | 65.2 | 4.0900 | 1.0 | 296.0 | 15.3 | 396.90 | 4.98 | 24.0 |
1 | 0.02731 | 0.0 | 7.07 | 0.0 | 0.469 | 6.421 | 78.9 | 4.9671 | 2.0 | 242.0 | 17.8 | 396.90 | 9.14 | 21.6 |
2 | 0.02729 | 0.0 | 7.07 | 0.0 | 0.469 | 7.185 | 61.1 | 4.9671 | 2.0 | 242.0 | 17.8 | 392.83 | 4.03 | 34.7 |
3 | 0.03237 | 0.0 | 2.18 | 0.0 | 0.458 | 6.998 | 45.8 | 6.0622 | 3.0 | 222.0 | 18.7 | 394.63 | 2.94 | 33.4 |
4 | 0.06905 | 0.0 | 2.18 | 0.0 | 0.458 | 7.147 | 54.2 | 6.0622 | 3.0 | 222.0 | 18.7 | 396.90 | 5.33 | 36.2 |
查看各个特征是否有相关性,判断用哪种模型合适
plt.figure(figsize=(12,8))
sns.heatmap(df.corr(), annot=True, fmt='.2f', cmap='PuBu')
<AxesSubplot: >
数据不存在相关性较小的属性,也不用担心共线性,故我们可以用线性回归模型去预测
df.corr()['MEDV'].sort_values()
LSTAT -0.737663
PTRATIO -0.507787
INDUS -0.483725
TAX -0.468536
NOX -0.427321
CRIM -0.388305
RAD -0.381626
AGE -0.376955
CHAS 0.175260
DIS 0.249929
B 0.333461
ZN 0.360445
RM 0.695360
MEDV 1.000000
Name: MEDV, dtype: float64
尝试了解因变量和自变量,自变量和自变量之间的关系
sns.pairplot(df[["LSTAT","RM","PTRATIO","MEDV"]])
<seaborn.axisgrid.PairGrid at 0x2c88da29a90>
由于数据没有null值,并且都是连续型数据,所以暂时不用对数据进行过多的处理,不够既然要建立模型,首先就要进行对housing分为训练集和测试集,取出了大概百分之20的数据作为测试集,剩下的百分之70为训练集
X ,y = x_data,y_data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=888)
首先,利用线性回归模型对数据进行训练,并预测测试集数据
linear_model = LinearRegression()
linear_model.fit(X_train, y_train)
coef = linear_model.coef_#回归系数
line_pre = linear_model.predict(X_test)
print('SCORE:{:.4f}'.format(linear_model.score(X_test, y_test)))
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, line_pre))))
coef
SCORE:0.7559
RMSE:4.3708
array([-1.19007229e-01, 3.64055815e-02, 1.68552680e-02, 2.29397031e+00,
-1.60706448e+01, 3.72371469e+00, 9.22765437e-03, -1.30674803e+00,
3.43072685e-01, -1.45830386e-02, -9.73486692e-01, 7.89797436e-03,
-5.72555056e-01])
线性回归相关的系数如下
df_coef = pd.DataFrame()
df_coef['Title'] = df.columns.delete(-1)
df_coef['Coef'] = coef
df_coef
Title | Coef | |
---|---|---|
0 | CRIM | -0.119007 |
1 | ZN | 0.036406 |
2 | INDUS | 0.016855 |
3 | CHAS | 2.293970 |
4 | NOX | -16.070645 |
5 | RM | 3.723715 |
6 | AGE | 0.009228 |
7 | DIS | -1.306748 |
8 | RAD | 0.343073 |
9 | TAX | -0.014583 |
10 | PTRATIO | -0.973487 |
11 | B | 0.007898 |
12 | LSTAT | -0.572555 |
hos_pre = pd.DataFrame()
hos_pre['Predict'] = line_pre
hos_pre['Truth'] = y_test
hos_pre.plot()
<AxesSubplot: >
试使用相关性最高的3个特征量重建模型,并与原模型进行比较
df.corr()['MEDV'].abs().sort_values(ascending=False).head(4)
MEDV 1.000000
LSTAT 0.737663
RM 0.695360
PTRATIO 0.507787
Name: MEDV, dtype: float64
X2 = np.array(df[['LSTAT','RM','PTRATIO']])
X2_train, X2_test, y_train, y_test = train_test_split(X2, y, random_state=1,test_size=0.2)
linear_model2 = LinearRegression()
linear_model2.fit(X2_train,y_train)
print(linear_model2.intercept_)
print(linear_model2.coef_)
line2_pre = linear_model2.predict(X2_test) #预测值
print('SCORE:{:.4f}'.format(linear_model2.score(X2_test, y_test)))#模型评分
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, line2_pre))))#RMSE(标准误差)
24.112838451644947
[-0.59061956 3.81700007 -0.97465491]
SCORE:0.6959
RMSE:5.4820
在选取的特征数量远小于第一个模型情况下,得分0.695,略小于第一个模型的0.7559
X ,y = x_data,y_data
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=888)
from sklearn import ensemble
clf = ensemble.GradientBoostingRegressor()
clf.fit(X_train, y_train)
clf_pre=clf.predict(X_test) #预测值
print('SCORE:{:.4f}'.format(clf.score(X_test, y_test)))#模型评分
print('RMSE:{:.4f}'.format(np.sqrt(mean_squared_error(y_test, clf_pre))))#RMSE(标准误差)
SCORE:0.9188
RMSE:2.5209
可以看出,梯度决策树(Gradient Boosted Decision Tree)算法能得到非常良好的结果
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。