【Python学习系列十一】Python实现决策树实现C4.5(信息增益率)
生活随笔
收集整理的這篇文章主要介紹了
【Python学习系列十一】Python实现决策树实现C4.5(信息增益率)
小編覺得挺不錯的,現在分享給大家,幫大家做個參考.
C4.5是基于ID3改進的分類決策樹算法,特點是C4.用信息增益率來選擇屬性,而ID3使用的是熵(entropy, 熵是一種不純度度量準則),且對非離散數據也能處理,能夠對不完整數據進行處理。
- 1、信息熵:
- 2、條件熵:
-
-
3、信息增益:
g(D,A)?=H(D)-H(D/A)
4、信息增益率:
gr(D,A)=g(D,A)/H(A)?
1)C45DTree.py
# -*- coding: utf-8 -*-from numpy import * import math import copy import cPickle as pickleclass C45DTree(object):def __init__(self): # 構造方法self.tree = {} # 生成樹self.dataSet = [] # 數據集self.labels = [] # 標簽集# 數據導入函數def loadDataSet(self, path, labels):recordList = []fp = open(path, "rb") # 讀取文件內容content = fp.read()fp.close()rowList = content.splitlines() # 按行轉換為一維表recordList = [row.split(",") for row in rowList if row.strip()] # strip()函數刪除空格、Tab等self.dataSet = recordListself.labels = labels# 執行決策樹函數def train(self):labels = copy.deepcopy(self.labels)self.tree = self.buildTree(self.dataSet, labels)# 構件決策樹:穿件決策樹主程序def buildTree(self, dataSet, lables):cateList = [data[-1] for data in dataSet] # 抽取源數據集中的決策標簽列# 程序終止條件1:如果classList只有一種決策標簽,停止劃分,返回這個決策標簽if cateList.count(cateList[0]) == len(cateList):return cateList[0]# 程序終止條件2:如果數據集的第一個決策標簽只有一個,返回這個標簽if len(dataSet[0]) == 1:return self.maxCate(cateList)# 核心部分bestFeat, featValueList= self.getBestFeat(dataSet) # 返回數據集的最優特征軸bestFeatLabel = lables[bestFeat]tree = {bestFeatLabel: {}}del (lables[bestFeat])for value in featValueList: # 決策樹遞歸生長subLables = lables[:] # 將刪除后的特征類別集建立子類別集# 按最優特征列和值分隔數據集splitDataset = self.splitDataSet(dataSet, bestFeat, value)subTree = self.buildTree(splitDataset, subLables) # 構建子樹tree[bestFeatLabel][value] = subTreereturn tree# 計算出現次數最多的類別標簽def maxCate(self, cateList):items = dict([(cateList.count(i), i) for i in cateList])return items[max(items.keys())]# 計算最優特征def getBestFeat(self, dataSet):Num_Feats = len(dataSet[0][:-1])totality = len(dataSet)BaseEntropy = self.computeEntropy(dataSet)ConditionEntropy = [] # 初始化條件熵slpitInfo = [] # for C4.5,caculate gain ratioallFeatVList = []for f in xrange(Num_Feats):featList = [example[f] for example in dataSet][splitI, featureValueList] = self.computeSplitInfo(featList)allFeatVList.append(featureValueList)slpitInfo.append(splitI)resultGain = 0.0for value in featureValueList:subSet = self.splitDataSet(dataSet, f, value)appearNum = float(len(subSet))subEntropy = self.computeEntropy(subSet)resultGain += (appearNum/totality)*subEntropyConditionEntropy.append(resultGain) # 總條件熵infoGainArray = BaseEntropy*ones(Num_Feats)-array(ConditionEntropy)infoGainRatio = infoGainArray/array(slpitInfo) # C4.5信息增益的計算bestFeatureIndex = argsort(-infoGainRatio)[0]return bestFeatureIndex, allFeatVList[bestFeatureIndex]# 計算劃分信息def computeSplitInfo(self, featureVList):numEntries = len(featureVList)featureVauleSetList = list(set(featureVList))valueCounts = [featureVList.count(featVec) for featVec in featureVauleSetList]pList = [float(item)/numEntries for item in valueCounts]lList = [item*math.log(item, 2) for item in pList]splitInfo = -sum(lList)return splitInfo, featureVauleSetList# 計算信息熵# @staticmethoddef computeEntropy(self, dataSet):dataLen = float(len(dataSet))cateList = [data[-1] for data in dataSet] # 從數據集中得到類別標簽# 得到類別為key、 出現次數value的字典items = dict([(i, cateList.count(i)) for i in cateList])infoEntropy = 0.0for key in items: # 香農熵: = -p*log2(p) --infoEntropy = -prob * log(prob, 2)prob = float(items[key]) / dataLeninfoEntropy -= prob * math.log(prob, 2)return infoEntropy# 劃分數據集: 分割數據集; 刪除特征軸所在的數據列,返回剩余的數據集# dataSet : 數據集; axis: 特征軸; value: 特征軸的取值def splitDataSet(self, dataSet, axis, value):rtnList = []for featVec in dataSet:if featVec[axis] == value:rFeatVec = featVec[:axis] # list操作:提取0~(axis-1)的元素rFeatVec.extend(featVec[axis + 1:]) # 將特征軸之后的元素加回rtnList.append(rFeatVec)return rtnList# 存取樹到文件def storetree(self, inputTree, filename):fw = open(filename,'w')pickle.dump(inputTree, fw)fw.close()# 從文件抓取樹def grabTree(self, filename):fr = open(filename)return pickle.load(fr)2)C45DTreeDemo.py # -*- coding: utf-8 -*-from numpy import * from C45DTree import *dtree = C45DTree() dtree.loadDataSet("D:\dataset.dat",["outlook", "temperature", "humidity", "windy"]) dtree.train()dtree.storetree(dtree.tree, "data.tree") mytree = dtree.grabTree("data.tree")print mytree
3)測試數據和執行結果: 0, 0, 0, 0, N 0, 0, 0, 1, N 1, 0, 0, 0, Y 2, 1, 0, 0, Y 2, 2, 1, 0, Y 2, 2, 1, 1, N 1, 2, 1, 1, Y
{'windy': {' 0': {'outlook': {'1': ' Y ', '0': ' N ', '2': ' Y '}}, ' 1': {'outlook': {'1': ' Y', '0': ' N ', '2': ' N '}}}}
總結
以上是生活随笔為你收集整理的【Python学习系列十一】Python实现决策树实现C4.5(信息增益率)的全部內容,希望文章能夠幫你解決所遇到的問題。
- 上一篇: 【正一专栏】轮回-从坚信去年骑士会逆转到
- 下一篇: 【Python学习系列十二】Python