# -*- coding: UTF-8 -*-
from numpy import *
#獲取輸入詞條和分類標簽
def loadataset():
postinglist=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\
['stop', 'posting', 'stupid', 'worthless', 'garbage'],\
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classvec=[0,1,0,1,0,1]
return postinglist,classvec
#獲取詞條的所有單詞集合(詞匯表),以列表形式返回
def createvocablist(dataset):
vocabset=set()
for i in dataset:
vocabset=vocabset|set(i)
return list(vocabset)
#輸入待分類詞條,測試詞條在詞匯表中是否出現,記為1
#此處為詞集模型,只判斷特征值出現與否,對出現頻率未加考慮
def setofwords2vec(vocabset,input):
inputvec=[0]*len(vocabset)
for word in input:
if word in vocabset:
inputvec[vocabset.index(word)]=1
else:
print 'the word %s is not in the vocabset' %word
return inputvec
#詞袋模型。對多次出現的特征值累加計數
def bagofwords2vec(vocabset,input):
inputvec=[0]*len(vocabset)
for word in input:
if word in vocabset:
inputvec[vocabset.index(word)]+=1
else:
print 'the word %s is not in the vocabset' %word
return inputvec
#返回類先驗概率和似然函數 <ndarray形式>
def trainnb(trainmatrix,traincategory):
numtraindoc=len(trainmatrix)
numwords=len(trainmatrix[0])
positive=sum(traincategory)/float(numtraindoc) #計算垃圾詞條的概率,先驗概率
p0num=ones(numwords);p1num=ones(numwords)
p0sum=0.0; p1sum=0.0
for i in range(numtraindoc):
if traincategory[i]==1:
p1num+=trainmatrix[i] #2個list無法相加,故引入numpy進行對應位置的運算
p1sum+=sum(trainmatrix[i])
else:
p0num+=trainmatrix[i]
p0sum+=sum(trainmatrix[i])
p0vect=log(p0num/p0sum)
p1vect=log(p1num/p1sum)
return p0vect,p1vect,positive
#貝葉斯公式判斷不同分類的概率大小
def classifynb(vec2classify,p0vect,p1vect,positive):
p1=sum(vec2classify*p1vect)+log(positive)
p0=sum(vec2classify*p0vect)+log(1-positive)
if p1>p0:
return 1
else:
return 0
#分類未知詞條
def testingnb():
listvec,classvec=loadataset()
myvocablist=createvocablist(listvec)
trainmat=[]
for doc in listvec:
trainmat.append(bagofwords2vec(myvocablist,doc)) #獲取每個詞條在詞匯表出現與否的列向量
p0vect,p1vect,positive=trainnb(trainmat,classvec)
testentry=['love','my','dalmation']
thisdoc=array(bagofwords2vec(myvocablist,testentry))
print testentry,'classified as:',classifynb(thisdoc,p0vect,p1vect,positive)
testentry=['stupid','garbage']
thisdoc=array(bagofwords2vec(myvocablist,testentry))
print testentry,'classified as:',classifynb(thisdoc,p0vect,p1vect,positive)
#解析文本文件
def textparse(bigstring):
import re
listoftokens=re.split(r'\W*',bigstring)
return [i.lower() for i in listoftokens if len(i)>2]
#測試算法,使用樸素貝葉斯進行交叉驗證
def spamtext():
dataset=[]
category=[] #類別標簽
vocabset=[] #詞匯表
for i in range(1,26):
a=open('/Users/enniu/Desktop/jqxx/machinelearninginaction/Ch04/email/ham/%d.txt' %i).read()
dataset.append(textparse(a))
category.append(0)
for i in range(1,26):
b=open('/Users/enniu/Desktop/jqxx/machinelearninginaction/Ch04/email/spam/%d.txt' %i).read()
dataset.append(textparse(b))
category.append(1)
vocabset=createvocablist(dataset) #獲取輸入詞條的所有詞匯表
#隨機選擇10個樣本作為測試集
testset=[] #測試樣本序號
trainset=range(50) #訓練樣本序號。此步通過索引來操作,不需要更改詞集列表
for i in range(10):
randindex=int(random.uniform(0,len(trainset)))
testset.append(trainset[randindex])
del trainset[randindex]
#計算剩余40個樣本的似然概率.[P(x1|spam),P(x2|spam),P(x3|spam),...]
trainmat=[]
trainclass=[]
for i in trainset:
trainmat.append(setofwords2vec(vocabset,dataset[i]))
trainclass.append(category[i])
p0vect,p1vect,positive=trainnb(trainmat,trainclass)
#return p0vect,p1vect,positive
#計算正確與錯誤的概率
errorcount=0
for i in testset:
wordvect=setofwords2vec(vocabset,dataset[i])
precategory=classifynb(wordvect,p0vect,p1vect,positive)
if precategory!=category[i]:
errorcount+=1
print 'the error rate is: ',float(errorcount)/len(testset)
if __name__=='__main__':
i=0
while i<=10:
spamtext()
i=i+1
# -*- coding: UTF-8 -*-
from numpy import *
#獲取輸入詞條和分類標簽
def loadataset():
postinglist=[['my', 'dog', 'has', 'flea', 'problems', 'help', 'please'],\
['maybe', 'not', 'take', 'him', 'to', 'dog', 'park', 'stupid'],\
['my', 'dalmation', 'is', 'so', 'cute', 'I', 'love', 'him'],\
['stop', 'posting', 'stupid', 'worthless', 'garbage'],\
['mr', 'licks', 'ate', 'my', 'steak', 'how', 'to', 'stop', 'him'],\
['quit', 'buying', 'worthless', 'dog', 'food', 'stupid']]
classvec=[0,1,0,1,0,1]
return postinglist,classvec
#獲取詞條的所有單詞集合(詞匯表),以列表形式返回
def createvocablist(dataset):
vocabset=set()
for i in dataset:
vocabset=vocabset|set(i)
return list(vocabset)
#輸入待分類詞條,測試詞條在詞匯表中是否出現,記為1
#此處為詞集模型,只判斷特征值出現與否,對出現頻率未加考慮
def setofwords2vec(vocabset,input):
inputvec=[0]*len(vocabset)
for word in input:
if word in vocabset:
inputvec[vocabset.index(word)]=1
else:
print 'the word %s is not in the vocabset' %word
return inputvec
#詞袋模型。對多次出現的特征值累計計數
def bagofwords2vec(vocabset,input):
inputvec=[0]*len(vocabset)
for word in input:
if word in vocabset:
inputvec[vocabset.index(word)]+=1
else:
print 'the word %s is not in the vocabset' %word
return inputvec
#返回類先驗概率和似然函數 <ndarray形式>
def trainnb(trainmatrix,traincategory):
numtraindoc=len(trainmatrix)
numwords=len(trainmatrix[0])
positive=sum(traincategory)/float(numtraindoc) #計算垃圾詞條的概率,先驗概率
p0num=ones(numwords);p1num=ones(numwords)
p0sum=0.0; p1sum=0.0
for i in range(numtraindoc):
if traincategory[i]==1:
p1num+=trainmatrix[i] #2個list無法相加,故引入numpy進行對應位置的運算
p1sum+=sum(trainmatrix[i])
else:
p0num+=trainmatrix[i]
p0sum+=sum(trainmatrix[i])
p0vect=log(p0num/p0sum)
p1vect=log(p1num/p1sum)
return p0vect,p1vect,positive
#貝葉斯公式判斷不同分類的概率大小
def classifynb(vec2classify,p0vect,p1vect,positive):
p1=sum(vec2classify*p1vect)+log(positive)
p0=sum(vec2classify*p0vect)+log(1-positive)
if p1>p0:
return 1
else:
return 0
#分類未知詞條
def testingnb():
listvec,classvec=loadataset()
myvocablist=createvocablist(listvec)
trainmat=[]
for doc in listvec:
trainmat.append(bagofwords2vec(myvocablist,doc)) #獲取每個詞條在詞匯表出現與否的列向量
p0vect,p1vect,positive=trainnb(trainmat,classvec)
testentry=['love','my','dalmation']
thisdoc=array(bagofwords2vec(myvocablist,testentry))
print testentry,'classified as:',classifynb(thisdoc,p0vect,p1vect,positive)
testentry=['stupid','garbage']
thisdoc=array(bagofwords2vec(myvocablist,testentry))
print testentry,'classified as:',classifynb(thisdoc,p0vect,p1vect,positive)
if __name__=='__main__':
testingnb()