赞
踩
sklearn - API
class
sklearn.tree.
DecisionTreeClassifier
(criterion='gini', splitter='best', max_depth=None, min_samples_split=2, min_samples_leaf=1, min_weight_fraction_leaf=0.0, max_features=None, random_state=None, max_leaf_nodes=None, min_impurity_decrease=0.0, min_impurity_split=None, class_weight=None, presort=False)criterion是指标准,我们一般用gini指数来选择划分属性,数据集的纯度也用基尼指数来衡量,直观的来说,基尼指数反应了数据集D中随机抽取两个样本,其类别标记不一样的概率,基尼指数越小,纯度越高(ID3采用信息增益,C4.5采用信息增益比,CART采用gini指数,这里默认是gini指数,效果更好)
max_depth决定了运行的精度和速度,即树的深度
其他的参数大多都是默认的....
工作原理和步骤
我们使用 createBranch() 方法,如下所示:
检测数据集中的所有数据的分类标签是否相同: If so return 类标签 Else: 寻找划分数据集的最好特征(划分之后信息熵最小,也就是信息增益最大的特征) 划分数据集 创建分支节点 for 每个划分的子集 调用函数 createBranch (创建分支的函数)并增加返回结果到分支节点中 return 分支节点决策树 开发流程
收集数据:可以使用任何方法。 准备数据:树构造算法只适用于标称型数据,因此数值型数据必须离散化。 分析数据:可以使用任何方法,构造树完成之后,我们应该检查图形是否符合预期。 训练算法:构造树的数据结构。 测试算法:使用经验树计算错误率。(经验树没有搜索到较好的资料,有兴趣的同学可以来补充) 使用算法:此步骤可以适用于任何监督学习算法,而使用决策树可以更好地理解数据的内在含义。
python源码实现以及几个小例子
from math import log
import operator
import copy
def createDataSet():
dataSet = [[1, 1, 'yes'],
[1, 1, 'yes'],
[1, 0, 'no'],
[0, 1, 'no'],
[0, 1, 'no']]
labels = ['no surfacing','flippers']
#change to discrete values
return dataSet, labelsdef calcShannonEnt(dataSet):
numEntries = len(dataSet)
labelCounts = {}
for featVec in dataSet: #the the number of unique elements and their occurance
currentLabel = featVec[-1]
if currentLabel not in labelCounts.keys(): labelCounts[currentLabel] = 0
labelCounts[currentLabel] += 1
shannonEnt = 0.0
for key in labelCounts:
prob = float(labelCounts[key])/numEntries
shannonEnt -= prob * log(prob,2) #log base 2
return shannonEnt
def splitDataSet(dataSet, axis, value):
retDataSet = []
for featVec in dataSet:
if featVec[axis] == value:
reducedFeatVec = featVec[:axis] #chop out axis used for splitting
reducedFeatVec.extend(featVec[axis+1:])
retDataSet.append(reducedFeatVec)
return retDataSet
def chooseBestFeatureToSplit(dataSet):
numFeatures = len(dataSet[0]) - 1 #the last column is used for the labels
baseEntropy = calcShannonEnt(dataSet)
bestInfoGain = 0.0; bestFeature = -1
for i in range(numFeatures): #iterate over all the features
featList = [example[i] for example in dataSet]#create a list of all the examples of this feature
uniqueVals = set(featList) #get a set of unique values
newEntropy = 0.0
for value in uniqueVals:
subDataSet = splitDataSet(dataSet, i, value)
prob = len(subDataSet)/float(len(dataSet))
newEntropy += prob * calcShannonEnt(subDataSet)
infoGain = baseEntropy - newEntropy #calculate the info gain; ie reduction in entropy
if (infoGain > bestInfoGain): #compare this to the best gain so far
bestInfoGain = infoGain #if better than current best, set to best
bestFeature = i
return bestFeature #returns an integerdef majorityCnt(classList):
classCount={}
for vote in classList:
if vote not in classCount.keys(): classCount[vote] = 0
classCount[vote] += 1
sortedClassCount = sorted(classCount.iteritems(), key=operator.itemgetter(1), reverse=True)
return sortedClassCount[0][0]def createTree(dataSet,labels):
labels = copy.copy(labels)#由于传址参数的问题。故增加一行代码
classList = [example[-1] for example in dataSet]
if classList.count(classList[0]) == len(classList): #巧用count(),统计分类列表中与第一个结果相同的个数,若该个数等于数组总长度,则说明所有记录归属同一类别,停止划分。
return classList[0]
if len(dataSet[0]) == 1:
return majorityCnt(classList)
bestFeat = chooseBestFeatureToSplit(dataSet)
bestFeatLabel = labels[bestFeat]
myTree = {bestFeatLabel:{}}
del(labels[bestFeat])#这句本来是删除已经划分的属性,但labels列表是可变对象,在PYTHON函数中作为参数时传址引用,能够被全局修改,所以这行代码导致函数外的同名变量被删除了元素,造成例句无法执行,提示'no surfacing' is not in list。解决方法就是增加一个copy。当然了,要import copy模块!!!
featValues = [example[bestFeat] for example in dataSet]
uniqueVals = set(featValues)
for value in uniqueVals:
subLabels = labels[:] #函数参数传递是列表类型时,引用传递。不改变原始数据。
myTree[bestFeatLabel][value] = createTree(splitDataSet(dataSet, bestFeat, value),subLabels)
return myTreedef classify(inputTree,featLabels,testVec):
firstStr=list(inputTree.keys())[0]
secondDict=inputTree[firstStr]
featIndex=featLabels.index(firstStr)
for key in secondDict.keys():
if testVec[featIndex]==key:
if type(secondDict[key]).__name__=='dict':
classLabel=classify(secondDict[key],featLabels,testVec)
else: classLabel=secondDict[key]
return classLabeldef storeTree(inputTree,filename):
import pickle
fw = open(filename,'w')
pickle.dump(inputTree,fw)
fw.close()
def grabTree(filename):
import pickle
fr = open(filename)
return pickle.load(fr)dataSet,labels=createDataSet()
inputree=createTree(dataSet,labels)
print(classify(inputree,labels,[0,0]))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。