python2.7中实现中文分词,是引入了jieba中文分词库。再进行简单的词频统计。
import sys
reload(sys)
sys.setdefaultencoding('utf-8')
import jieba
import jieba.analyse
import xlwt #写入excel表的库
if __name__=="__main__":
wbk = xlwt.workbook(encoding = 'ascii')
sheet = wbk.add_sheet("wordcount")#excel单元格名字
word_lst = []
key_list=[]
for line in open('test.txt'):#test.txt是需要分词统计的文档
item = line.strip('\n\r').split('\t') #制表格切分
# print item
tags = jieba.analyse.extract_tags(item[0]) #jieba分词
for t in tags:
word_lst.append(t)
word_dict= {}
with open("wordcount.txt",'w') as wf2: #打开文件
for item in word_lst:
if item not in word_dict: #统计数量
word_dict[item] = 1
else:
word_dict[item] = 1
orderlist=list(word_dict.values())
orderlist.sort(reverse=true)
# print orderlist
for i in range(len(orderlist)):
for key in word_dict:
if word_dict[key]==orderlist[i]:
wf2.write(key ' ' str(word_dict[key]) '\n') #写入txt文档
key_list.append(key)
word_dict[key]=0
for i in range(len(key_list)):
sheet.write(i, 1, label = orderlist[i])
sheet.write(i, 0, label = key_list[i])
wbk.save('wordcount.xls') #保存为 wordcount.xls文件
得到分词结果: