Maching learning in action,naive bayes算法笔记

基于概率论的分类方法: 朴素贝叶斯

naive bayes是基于贝叶斯定理与特征条件独立假设的分类方法.对于给定的训练数据集,首先基于特征条件假设学习输入/输出的联合概率分布:然后基于此模型,对给定的输入x,利用贝叶斯定理求出后验概率最大的输出y.naive bayes法实现简单,学习与预测的效率都很高,是一种常用的方法

使用Python进行文本分类

准备数据: 从文本中构建词向量

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
def loadDataSet():
postingList=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],
['stop', 'posting', 'stupid', 'worthless', 'garbage'],
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classVec = [0,1,0,1,0,1] #1代表侮辱性文字,0代表正常言论
return postingList, classVec

def createVocabList(dataSet):
vocabSet = set([])
for document in dataSet:
vocabSet = vocabSet | set(document)
return list(vocabSet)

def setOfWords2Vec(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] = 1
else: print "the word: %s is not in my Vocabulary!" % word
return returnVec
1
2
3
listPosts, listClasses = loadDataSet()
myVocbList = createVocabList(listPosts)
myVocbList
['cute',
 'love',
 'help',
 'garbage',
 'quit',
 'I',
 'problems',
 'is',
 'park',
 'stop',
 'flea',
 'dalmation',
 'licks',
 'food',
 'not',
 'him',
 'buying',
 'posting',
 'has',
 'worthless',
 'ate',
 'to',
 'maybe',
 'please',
 'dog',
 'how',
 'stupid',
 'so',
 'take',
 'mr',
 'steak',
 'my']
1
setOfWords2Vec(myVocbList, listPosts[0])
[0,
 0,
 1,
 0,
 0,
 0,
 1,
 0,
 0,
 0,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 0,
 1,
 0,
 0,
 0,
 0,
 1,
 1,
 0,
 0,
 0,
 0,
 0,
 0,
 1]

训练算法: 从词向量计算概率

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
import numpy as np
def trainNB0(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numwords = len(trainMatrix[0])
pAbusive = sum(trainCategory) / float(numTrainDocs)
p0Num = np.zeros(numwords)
p1Num = np.zeros(numwords)
p0Denom = 0.0
p1Denom = 0.0
for i in xrange(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = p1Num/p1Denom
p0Vect = p0Num/p0Denom
return p0Vect, p1Vect, pAbusive
1
2
3
4
5
6
7
listPosts, listClasses = loadDataSet()
myVocbList = createVocabList(listPosts)
trainMat = []
for postinDoc in listPosts:
trainMat.append(setOfWords2Vec(myVocbList, postinDoc))
p0V, p1V, pAb = trainNB0(trainMat, listClasses)
pAb
0.5
1
p0V
array([ 0.04166667,  0.04166667,  0.04166667,  0.        ,  0.        ,
        0.04166667,  0.04166667,  0.04166667,  0.        ,  0.04166667,
        0.04166667,  0.04166667,  0.04166667,  0.        ,  0.        ,
        0.08333333,  0.        ,  0.        ,  0.04166667,  0.        ,
        0.04166667,  0.04166667,  0.        ,  0.04166667,  0.04166667,
        0.04166667,  0.        ,  0.04166667,  0.        ,  0.04166667,
        0.04166667,  0.125     ])
1
p1V
array([ 0.        ,  0.        ,  0.        ,  0.05263158,  0.05263158,
        0.        ,  0.        ,  0.        ,  0.05263158,  0.05263158,
        0.        ,  0.        ,  0.        ,  0.05263158,  0.05263158,
        0.05263158,  0.05263158,  0.05263158,  0.        ,  0.10526316,
        0.        ,  0.05263158,  0.05263158,  0.        ,  0.10526316,
        0.        ,  0.15789474,  0.        ,  0.05263158,  0.        ,
        0.        ,  0.        ])

测试算法: 根据现实情况修改分类器

拉普拉斯平滑(Laplace smoothing)

若用Laplace smoothing,然后取自然对数,参考统计学习方法,代码如下:

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
def trainNB02(trainMatrix, trainCategory):
numTrainDocs = len(trainMatrix)
numwords = len(trainMatrix[0])
pAbusive = (sum(trainCategory) + 1) / float(numTrainDocs+2) #书中没写,结果一样,但应该加上
p0Num = np.ones(numwords)
p1Num = np.ones(numwords)
p0Denom = 2.0
p1Denom = 2.0
for i in xrange(numTrainDocs):
if trainCategory[i] == 1:
p1Num += trainMatrix[i]
p1Denom += sum(trainMatrix[i])
else:
p0Num += trainMatrix[i]
p0Denom += sum(trainMatrix[i])
p1Vect = np.log(p1Num/p1Denom)
p0Vect = np.log(p0Num/p0Denom)
return p0Vect, p1Vect, pAbusive
1
2
3
4
5
6
7
listPosts, listClasses = loadDataSet()
myVocbList = createVocabList(listPosts)
trainMat = []
for postinDoc in listPosts:
trainMat.append(setOfWords2Vec(myVocbList, postinDoc))
p0V, p1V, pAb = trainNB02(trainMat, listClasses)
pAb
0.5
1
p0V
array([-2.56494936, -2.56494936, -2.56494936, -3.25809654, -3.25809654,
       -2.56494936, -2.56494936, -2.56494936, -3.25809654, -2.56494936,
       -2.56494936, -2.56494936, -2.56494936, -3.25809654, -3.25809654,
       -2.15948425, -3.25809654, -3.25809654, -2.56494936, -3.25809654,
       -2.56494936, -2.56494936, -3.25809654, -2.56494936, -2.56494936,
       -2.56494936, -3.25809654, -2.56494936, -3.25809654, -2.56494936,
       -2.56494936, -1.87180218])
1
p1V
array([-3.04452244, -3.04452244, -3.04452244, -2.35137526, -2.35137526,
       -3.04452244, -3.04452244, -3.04452244, -2.35137526, -2.35137526,
       -3.04452244, -3.04452244, -3.04452244, -2.35137526, -2.35137526,
       -2.35137526, -2.35137526, -2.35137526, -3.04452244, -1.94591015,
       -3.04452244, -2.35137526, -2.35137526, -3.04452244, -1.94591015,
       -3.04452244, -1.65822808, -3.04452244, -2.35137526, -3.04452244,
       -3.04452244, -3.04452244])

朴素贝叶斯分类函数

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
def classifyNB(vec2Classify, p0Vec, p1Vec, pClass1):
p1 = sum(vec2Classify * p1Vec) + np.log(pClass1)
p0 = sum(vec2Classify * p0Vec) + np.log(1 - pClass1)
if p1 > p0:
return 1
else: return 0

def testingNB():
listPosts, listClasses = loadDataSet()
myVocabList = createVocabList(listPosts)
trainMat = []
for post in listPosts:
trainMat.append(setOfWords2Vec(myVocabList, post))
p0V, p1V, pAb = trainNB02(trainMat, listClasses)
testEntry = ['love', 'my', 'dalmation']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'classified as:', classifyNB(thisDoc, p0V, p1V, pAb)
testEntry = ['stupid', 'garbage']
thisDoc = np.array(setOfWords2Vec(myVocabList, testEntry))
print testEntry,'classified as:', classifyNB(thisDoc, p0V, p1V, pAb)
1
testingNB()
['love', 'my', 'dalmation'] classified as: 0
['stupid', 'garbage'] classified as: 1

准备数据: 文档词袋模型

上面的模型仅统计单词是否出现,这里统计单词出现的总次数

1
2
3
4
5
6
def bagOfWord2VecMN(vocabList, inputSet):
returnVec = [0] * len(vocabList)
for word in inputSet:
if word in vocabList:
returnVec[vocabList.index(word)] += 1
return returnVec

示例: 使用朴素贝叶斯过滤垃圾邮件

准备数据: 切分文本

[s.lower() for s in re.split(r’\W*’, str) if len(s) > 2]

测试算法: 使用朴素贝叶斯进行交叉验证

1
2
3
import re
def textParse(bigString):
return [s.lower() for s in re.split(r'\W*', bigString) if len(s) > 2]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
import random
def spamTest():
docList = []
classList = []
for i in xrange(1, 26):
wordList = textParse(open('email/spam/%d.txt' % i).read())
# print wordList
docList.append(wordList)
classList.append(1)
wordList = textParse(open('email/ham/%d.txt' % i).read())
docList.append(wordList)
classList.append(0)
vocabList = createVocabList(docList)
trainingSet = range(50)
testSet = []
for i in xrange(10):
randIndex = int(random.uniform(0, len(trainingSet)))
testSet.append(trainingSet[randIndex])
del trainingSet[randIndex]
trainMat = []
trainClasses = []
for docIndex in trainingSet:
trainMat.append(setOfWords2Vec(vocabList, docList[docIndex]))
trainClasses.append(classList[docIndex])
p0V, p1V, pSpam = trainNB02(trainMat, trainClasses)
errorCount = 0
for docIndex in testSet:
wordVector = setOfWords2Vec(vocabList, docList[docIndex])
if classifyNB(wordVector, p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
print 'the error rate is: ', float(errorCount)/len(testSet)
1
spamTest()

示例: 使用朴素贝叶斯分类器从个人广告中回去区域倾向

收集数据: 导入RSS源

1
2
3
4
import feedparser
ny = feedparser.parse('http://newyork.craigslist.org/stp/index.rss')
sy = feedparser.parse('http://sfbay.craigslist.org/stp/index.rss')
len(sy['entries'])
25
1
2
3
4
5
6
def calcMostFreq(vocabList, fullText):
freqDict = {}
for token in vocabList:
freqDict[token] = fullText.count(token)
sortedFreq = sorted(freqDict, key=lambda x: x[1], reverse=True)
return sortedFreq[:30]
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
def localWords(feed1, feed0):
docList = []
classList = []
fullText = []
# print len(feed1['entries']), len(feed0['entries'])
minLen = min(len(feed1['entries']), len(feed0['entries']))
for i in xrange(minLen):
wordList = textParse(feed1['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(1)
wordList = textParse(feed0['entries'][i]['summary'])
docList.append(wordList)
fullText.extend(wordList)
classList.append(0)
vocabList = createVocabList(docList)
# print len(vocabList), len(fullText)
top30Words = calcMostFreq(vocabList, fullText)
# print top30Words
for pairW in top30Words:
vocabList.remove(pairW)
trainingSet = range(2 * minLen)
testSet = []
for i in xrange(20):
randIndex = int(random.uniform(0, len(trainingSet)))
testSet.append(trainingSet[randIndex])
del trainingSet[randIndex]
trainMat = []
trainClass = []
for docIndex in trainingSet:
trainMat.append(bagOfWord2VecMN(vocabList, docList[docIndex]))
trainClass.append(classList[docIndex])
p0V, p1V, pSpam = trainNB02(trainMat, trainClass)
errorCount = 0
for docIndex in testSet:
wordVector = bagOfWord2VecMN(vocabList, docList[docIndex])
if classifyNB(wordVector, p0V, p1V, pSpam) != classList[docIndex]:
errorCount += 1
print "the error rate is: ", float(errorCount)/len(testSet)
return vocabList, p0V, p1V
1
a, b, c = localWords(ny, sy)
the error rate is:  0.25

分析数据: 显示地域相关的用词

1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
def getTopWords(ny, sf):
vocabList, p0V, p1V = localWords(ny, sy)
topNY = []
topSF = []
for i in xrange(len(p0V)):
topNY.append((vocabList[i], p1V[i]))
topSF.append((vocabList[i], p0V[i]))
sortedNY = sorted(topNY, key=lambda x: x[1])
sortedSF = sorted(topSF, key=lambda x: x[1])
print "NY------------"
for i in xrange(8):
print sortedNY[i][0]
print "SF------------"
for i in xrange(8):
print sortedSF[i][0]
1
getTopWords(ny, sy)
the error rate is:  0.15
NY------------
enjoyable
asian
hanging
shot
95748
technique
ucsc
languages
SF------------
all
focus
month
enjoyable
doujins
hanging
shot
certainly