python实现C4.5决策树算法

yipeiwu_com5年前Python基础

C4.5算法使用信息增益率来代替ID3的信息增益进行特征的选择,克服了信息增益选择特征时偏向于特征值个数较多的不足。信息增益率的定义如下:

# -*- coding: utf-8 -*-


from numpy import *
import math
import copy
import cPickle as pickle


class C45DTree(object):
 def __init__(self): # 构造方法
  self.tree = {} # 生成树
  self.dataSet = [] # 数据集
  self.labels = [] # 标签集


 # 数据导入函数
 def loadDataSet(self, path, labels):
  recordList = []
  fp = open(path, "rb") # 读取文件内容
  content = fp.read()
  fp.close()
  rowList = content.splitlines() # 按行转换为一维表
  recordList = [row.split("\t") for row in rowList if row.strip()] # strip()函数删除空格、Tab等
  self.dataSet = recordList
  self.labels = labels


 # 执行决策树函数
 def train(self):
  labels = copy.deepcopy(self.labels)
  self.tree = self.buildTree(self.dataSet, labels)


 # 构件决策树:穿件决策树主程序
 def buildTree(self, dataSet, lables):
  cateList = [data[-1] for data in dataSet] # 抽取源数据集中的决策标签列
  # 程序终止条件1:如果classList只有一种决策标签,停止划分,返回这个决策标签
  if cateList.count(cateList[0]) == len(cateList):
   return cateList[0]
  # 程序终止条件2:如果数据集的第一个决策标签只有一个,返回这个标签
  if len(dataSet[0]) == 1:
   return self.maxCate(cateList)
  # 核心部分
  bestFeat, featValueList= self.getBestFeat(dataSet) # 返回数据集的最优特征轴
  bestFeatLabel = lables[bestFeat]
  tree = {bestFeatLabel: {}}
  del (lables[bestFeat])
  for value in featValueList: # 决策树递归生长
   subLables = lables[:] # 将删除后的特征类别集建立子类别集
   # 按最优特征列和值分隔数据集
   splitDataset = self.splitDataSet(dataSet, bestFeat, value)
   subTree = self.buildTree(splitDataset, subLables) # 构建子树
   tree[bestFeatLabel][value] = subTree
  return tree


 # 计算出现次数最多的类别标签
 def maxCate(self, cateList):
  items = dict([(cateList.count(i), i) for i in cateList])
  return items[max(items.keys())]


 # 计算最优特征
 def getBestFeat(self, dataSet):
  Num_Feats = len(dataSet[0][:-1])
  totality = len(dataSet)
  BaseEntropy = self.computeEntropy(dataSet)
  ConditionEntropy = []  # 初始化条件熵
  slpitInfo = [] # for C4.5,caculate gain ratio
  allFeatVList = []
  for f in xrange(Num_Feats):
   featList = [example[f] for example in dataSet]
   [splitI, featureValueList] = self.computeSplitInfo(featList)
   allFeatVList.append(featureValueList)
   slpitInfo.append(splitI)
   resultGain = 0.0
   for value in featureValueList:
    subSet = self.splitDataSet(dataSet, f, value)
    appearNum = float(len(subSet))
    subEntropy = self.computeEntropy(subSet)
    resultGain += (appearNum/totality)*subEntropy
   ConditionEntropy.append(resultGain) # 总条件熵
  infoGainArray = BaseEntropy*ones(Num_Feats)-array(ConditionEntropy)
  infoGainRatio = infoGainArray/array(slpitInfo) # C4.5信息增益的计算
  bestFeatureIndex = argsort(-infoGainRatio)[0]
  return bestFeatureIndex, allFeatVList[bestFeatureIndex]

 # 计算划分信息
 def computeSplitInfo(self, featureVList):
  numEntries = len(featureVList)
  featureVauleSetList = list(set(featureVList))
  valueCounts = [featureVList.count(featVec) for featVec in featureVauleSetList]
  pList = [float(item)/numEntries for item in valueCounts]
  lList = [item*math.log(item, 2) for item in pList]
  splitInfo = -sum(lList)
  return splitInfo, featureVauleSetList


 # 计算信息熵
 # @staticmethod
 def computeEntropy(self, dataSet):
  dataLen = float(len(dataSet))
  cateList = [data[-1] for data in dataSet] # 从数据集中得到类别标签
  # 得到类别为key、 出现次数value的字典
  items = dict([(i, cateList.count(i)) for i in cateList])
  infoEntropy = 0.0
  for key in items: # 香农熵: = -p*log2(p) --infoEntropy = -prob * log(prob, 2)
   prob = float(items[key]) / dataLen
   infoEntropy -= prob * math.log(prob, 2)
  return infoEntropy


 # 划分数据集: 分割数据集; 删除特征轴所在的数据列,返回剩余的数据集
 # dataSet : 数据集; axis: 特征轴; value: 特征轴的取值
 def splitDataSet(self, dataSet, axis, value):
  rtnList = []
  for featVec in dataSet:
   if featVec[axis] == value:
    rFeatVec = featVec[:axis] # list操作:提取0~(axis-1)的元素
    rFeatVec.extend(featVec[axis + 1:]) # 将特征轴之后的元素加回
    rtnList.append(rFeatVec)
  return rtnList

 # 存取树到文件
 def storetree(self, inputTree, filename):
  fw = open(filename,'w')
  pickle.dump(inputTree, fw)
  fw.close()

 # 从文件抓取树
 def grabTree(self, filename):
  fr = open(filename)
  return pickle.load(fr)

调用代码

# -*- coding: utf-8 -*-

from numpy import *
from C45DTree import *

dtree = C45DTree()
dtree.loadDataSet("dataset.dat",["age", "revenue", "student", "credit"])
dtree.train()

dtree.storetree(dtree.tree, "data.tree")
mytree = dtree.grabTree("data.tree")
print mytree

以上就是本文的全部内容,希望对大家的学习有所帮助,也希望大家多多支持【听图阁-专注于Python设计】。

相关文章

启动targetcli时遇到错误解决办法

 启动targetcli时遭遇ImportError: cannot import name ALUATargetPortGroup故障 targetcli是一个iSCSI配置...

Python并发之多进程的方法实例代码

一,进程的理论基础 一个应用程序,归根结底是一堆代码,是静态的,而进程才是执行中的程序,在一个程序运行的时候会有多个进程并发执行。 进程和线程的区别: 进程是系统资源分配的基本单位...

Pycharm学习教程(2) 代码风格

Pycharm学习教程(2) 代码风格

如何创建一个Python工程并使其具有Pycharm的代码风格,具体如下 1、主题   这部分教程主要介绍如何创建一个Python工程并使其具有Pycharm的代码风格。你将会看到Pyc...

Python实现的彩票机选器实例

Python实现的彩票机选器实例

本文实例讲述了Python实现彩票机选器的方法。分享给大家供大家参考。具体实现方法如下: # -*- coding: utf8 -*- from Tkinter import * i...

使用python编写简单的小程序编译成exe跑在win10上

使用python编写简单的小程序编译成exe跑在win10上

每天的工作其实很无聊,早知道应该去IT公司闯荡的。最近的工作内容是每逢一个整点,从早7点到晚11点,去查一次客流数据,整理到表格中,上交给素未蒙面的上线,由他呈交领导查阅。   人的精力...