#!/usr/bin/env python # coding: utf-8 # *** # *** # # 文本挖掘简介 # # *** # *** # # 王成军 # # wangchengjun@nju.edu.cn # # 计算传播网 http://computational-communication.com # # What can be learned from 5 million books # # http://v.youku.com/v_show/id_XMzA3OTA5MjUy.html # # This talk by Jean-Baptiste Michel and Erez Lieberman Aiden is phenomenal. # # Michel, J.-B., et al. (2011). Quantitative Analysis of Culture Using Millions of Digitized Books. Science, 331, 176–182. # ![](./img/books.jpg) # 试一下谷歌图书的数据: https://books.google.com/ngrams/ # # # 数据下载: http://www.culturomics.org/home # # Bag-of-words model (BOW) # # Represent text as numerical feature vectors # - We create a vocabulary of unique tokens—for example, words—from the entire set of documents. # - We construct a feature vector from each document that contains the counts of how often each word occurs in the particular document. # Since the unique words in each document represent only a small subset of all the # words in the bag-of-words vocabulary, the feature vectors will consist of mostly # zeros, which is why we call them sparse # # Bag of words,也叫做“词袋”,在信息检索中,Bag of words model假定对于一个文本,忽略其词序和语法,句法,将其仅仅看做是一个词集合,或者说是词的一个组合,文本中每个词的出现都是独立的,不依赖于其他词是否出现,或者说当这篇文章的作者在任意一个位置选择一个词汇都不受前面句子的影响而独立选择的。这种假设虽然对自然语言进行了简化,便于模型化。 # # 假定在有些情况下是不合理的,例如在新闻个性化推荐中,采用Bag of words的模型就会出现问题。例如用户甲对“南京醉酒驾车事故”这个短语很感兴趣,采用bag of words忽略了顺序和句法,则认为用户甲对“南京”、“醉酒”、“驾车”和“事故”感兴趣,因此可能推荐出和“南京”,“公交车”,“事故”相关的新闻,这显然是不合理的。 # # 解决的方法可以采用SCPCD的方法抽取出整个短语,或者采用高阶(2阶以上)统计语言模型,例如bigram,trigram来将词序保留下来,相当于bag of bigram和bag of trigram,这样能在一定程度上解决这种问题。简言之,bag of words模型是否适用需要根据实际情况来确定。对于那些不可以忽视词序,语法和句法的场合均不能采用bag of words的方法。 # # Transforming words into feature vectors # A document-term matrix or term-document matrix is a mathematical matrix that describes the frequency of terms that occur in a collection of documents. # # In a document-term matrix, rows correspond to documents in the collection and columns correspond to terms. # # There are various schemes for determining the value that each entry in the matrix should take. One such scheme is tf-idf. They are useful in the field of natural language processing. # D1 = "I like databases" # # D2 = "I hate databases" # # | | I | like |hate | databases | # | -------------|:-------------:|:-------------:|:-------------:|-----:| # | D1| 1| 1 | 0 |1| # | D2| 1| 0 | 1 |1| # In[1]: import numpy as np from sklearn.feature_extraction.text import CountVectorizer count = CountVectorizer() docs = np.array([ 'The sun is shining', 'The weather is sweet', 'The sun is shining and the weather is sweet']) bag = count.fit_transform(docs) # In[2]: ' '.join(dir(count)) # In[2]: count.get_feature_names() # In[3]: print(count.vocabulary_) # In[4]: type(bag) # In[5]: print(bag.toarray()) # In[6]: import pandas as pd pd.DataFrame(bag.toarray(), columns = count.get_feature_names()) # # 1-gram # The sequence of items in the bag-of-words model that we just created is also called the 1-gram or unigram model # - each item or token in the vocabulary represents a single word. # # # n-gram # The choice of the number n in the n-gram model depends on the particular application # # - 1-gram: "the", "sun", "is", "shining" # - 2-gram: "the sun", "sun is", "is shining" # The CountVectorizer class in scikit-learn allows us to use different # n-gram models via its `ngram_range` parameter. # # While a 1-gram # representation is used by default # # we could switch to a 2-gram # representation by initializing a new CountVectorizer instance with # ngram_range=(2,2). # # Assessing word relevancy via term frequency-inverse document frequency # # $tf-idf(t, d) = tf(t, d) \times idf(t)$ # # ## $tf(t, d)$ is the term frequency of term t in document d. # # ## inverse document frequency $idf(t)$ can be calculated as: # # # $idf(t) = log \frac{n_d}{1 + df(d, t)}$ # # [SKlearn use_idf=True, smooth_idf=True](http://scikit-learn.org/stable/modules/generated/sklearn.feature_extraction.text.TfidfTransformer.html#sklearn.feature_extraction.text.TfidfTransformer) # # # $idf(t) = log \frac{n_d}{df(d, t) + 1} + 1$ # # ## where $n_d$ is the total number of documents, and $df(d, t)$ is the number of documents $d$ that contain the term $t$. # # # # 提问: Why do we add the constant 1 to the denominator ? # ![](./img/ask.jpeg) # # 课堂作业:请根据公式计算'is'这个词在文本2中的tfidf数值? # # ![](./img/ask.jpeg) # # TfidfTransformer # Scikit-learn implements yet another transformer, the TfidfTransformer, that # takes the raw term frequencies from CountVectorizer as input and transforms # them into tf-idfs: # In[7]: from sklearn.feature_extraction.text import TfidfTransformer np.set_printoptions(precision=2) tfidf = TfidfTransformer(use_idf=True, norm='l2', smooth_idf=True) print(tfidf.fit_transform(count.fit_transform(docs)).toarray()) # In[16]: from sklearn.feature_extraction.text import TfidfTransformer np.set_printoptions(precision=2) tfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=True) print(tfidf.fit_transform(count.fit_transform(docs)).toarray()) # In[17]: import pandas as pd bag = tfidf.fit_transform(count.fit_transform(docs)) pd.DataFrame(bag.toarray(), columns = count.get_feature_names()) # In[19]: # 一个词的tfidf值 import numpy as np tf_is = 2.0 n_docs = 3.0 #idf_is = np.log(n_docs / (3)) idf_is = np.log(n_docs / (3)) + 1 tfidf_is = tf_is * idf_is print('tf-idf of term "is" = %.2f' % tfidf_is) # In[20]: # 最后一个文本里的词的tfidf原始数值(未标准化) tfidf = TfidfTransformer(use_idf=True, norm=None, smooth_idf=False) raw_tfidf = tfidf.fit_transform(count.fit_transform(docs)).toarray()[-1] raw_tfidf, count.get_feature_names() # # The tf-idf equation that was implemented in scikit-learn is as follows: # # # $tf-idf(t, d) = tf(t, d) \times (idf(t, d) + 1)$ # # # L2-normalization # # $l2_{x} = \frac{x} {np.sqrt(np.sum(x^2))}$ # # # In[21]: # l2标准化后的tfidf数值 l2_tfidf = raw_tfidf / np.sqrt(np.sum(raw_tfidf**2)) l2_tfidf # # 政府工作报告文本挖掘 # ## 0. 读取数据 # In[124]: with open('../data/gov_reports1954-2017.txt', 'r', encoding = 'utf-8') as f: reports = f.readlines() # In[125]: len(reports) # In[126]: reports[4] # In[73]: print(reports[0][:1000]) # # pip install jieba # > https://github.com/fxsjy/jieba # # # pip install wordcloud # > https://github.com/amueller/word_cloud # # # pip install gensim # # # 在terminal里成功安装第三方的包,结果发现在notebook里无法import # > 这个问题多出现于mac用户,因为mac有一个系统自带的python,成功安装的第三方包都被安装到了系统自带的python里。因此需要确保我们使用的是conda自己的pip,即需要指定pip的路径名,比如我的pip路径名在:/Users/chengjun/anaconda/bin/pip,那么在terminal里输入: # # /Users/chengjun/anaconda/bin/pip install package_name # In[74]: get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.cm as cm import matplotlib.pyplot as plt import sys import numpy as np from collections import defaultdict import statsmodels.api as sm from wordcloud import WordCloud import jieba import matplotlib import gensim from gensim import corpora, models, similarities from gensim.utils import simple_preprocess from gensim.parsing.preprocessing import STOPWORDS #matplotlib.rcParams['font.sans-serif'] = ['Microsoft YaHei'] #指定默认字体 matplotlib.rc("savefig", dpi=400) # In[75]: # 为了确保中文可以在matplotlib里正确显示 #matplotlib.rcParams['font.sans-serif'] = ['Microsoft YaHei'] #指定默认字体 # 需要确定系统安装了Microsoft YaHei # In[76]: # import matplotlib # my_font = matplotlib.font_manager.FontProperties( # fname='/Users/chengjun/github/cjc/data/msyh.ttf') # # 1. 分词 # In[77]: import jieba seg_list = jieba.cut("我来到北京清华大学", cut_all=True) print("Full Mode: " + "/ ".join(seg_list)) # 全模式 seg_list = jieba.cut("我来到北京清华大学", cut_all=False) print("Default Mode: " + "/ ".join(seg_list)) # 精确模式 seg_list = jieba.cut("他来到了网易杭研大厦") # 默认是精确模式 print(", ".join(seg_list)) seg_list = jieba.cut_for_search("小明硕士毕业于中国科学院计算所,后在日本京都大学深造") # 搜索引擎模式 print(", ".join(seg_list)) # ## 2. 停用词 # In[78]: filename = '../data/stopwords.txt' stopwords = {} f = open(filename, 'r') line = f.readline().rstrip() while line: stopwords.setdefault(line, 0) stopwords[line] = 1 line = f.readline().rstrip() f.close() # In[28]: adding_stopwords = [u'我们', u'要', u'地', u'有', u'这', u'人', u'发展',u'建设',u'加强',u'继续',u'对',u'等', u'推进',u'工作',u'增加'] for s in adding_stopwords: stopwords[s]=10 # ## 3. 关键词抽取 # ### 基于TF-IDF 算法的关键词抽取 # In[79]: import jieba.analyse txt = reports[-1] tf = jieba.analyse.extract_tags(txt, topK=200, withWeight=True) # In[80]: u"、".join([i[0] for i in tf[:50]]) # In[81]: plt.hist([i[1] for i in tf]) plt.show() # ### 基于 TextRank 算法的关键词抽取 # In[82]: tr = jieba.analyse.textrank(txt,topK=200, withWeight=True) u"、".join([i[0] for i in tr[:50]]) # In[83]: plt.hist([i[1] for i in tr]) plt.show() # In[84]: import pandas as pd def keywords(index): txt = reports[-index] tf = jieba.analyse.extract_tags(txt, topK=200, withWeight=True) tr = jieba.analyse.textrank(txt,topK=200, withWeight=True) tfdata = pd.DataFrame(tf, columns=['word', 'tfidf']) trdata = pd.DataFrame(tr, columns=['word', 'textrank']) worddata = pd.merge(tfdata, trdata, on='word') fig = plt.figure(figsize=(16, 6),facecolor='white') plt.plot(worddata.tfidf, worddata.textrank, linestyle='',marker='.') for i in range(len(worddata.word)): plt.text(worddata.tfidf[i], worddata.textrank[i], worddata.word[i], fontsize = worddata.textrank[i]*30, color = 'red', rotation = 0 ) plt.title(txt[:4]) plt.xlabel('Tf-Idf') plt.ylabel('TextRank') plt.show() # In[85]: keywords(1) # In[86]: keywords(2) # In[87]: keywords(3) # #### 算法论文: # # TextRank: Bringing Order into Texts # # ### 基本思想: # # * 将待抽取关键词的文本进行分词 # * 以固定窗口大小(默认为5,通过span属性调整),词之间的共现关系,构建图 # * 计算图中节点的PageRank,注意是无向带权图 # ## 4. 词云 # In[88]: def wordcloudplot(txt, year): wordcloud = WordCloud(font_path='../data/msyh.ttf').generate(txt) # Open a plot of the generated image. fig = plt.figure(figsize=(16, 6),facecolor='white') plt.imshow(wordcloud) plt.title(year) plt.axis("off") #plt.show() # #### 基于tfidf过滤的词云 # In[23]: txt = reports[-1] tfidf200= jieba.analyse.extract_tags(txt, topK=200, withWeight=False) seg_list = jieba.cut(txt, cut_all=False) seg_list = [i for i in seg_list if i in tfidf200] txt200 = r' '.join(seg_list) wordcloudplot(txt200, txt[:4]) # In[24]: txt = reports[-2] tfidf200= jieba.analyse.extract_tags(txt, topK=200, withWeight=False) seg_list = jieba.cut(txt, cut_all=False) seg_list = [i for i in seg_list if i in tfidf200] txt200 = r' '.join(seg_list) wordcloudplot(txt200, txt[:4]) # In[326]: txt = reports[-2] tfidf200= jieba.analyse.extract_tags(txt, topK=200, withWeight=False) seg_list = jieba.cut(txt, cut_all=False) seg_list = [i for i in seg_list if i in tfidf200] txt200 = r' '.join(seg_list) wordcloudplot(txt200, txt[:4]) # In[59]: wordfreq = defaultdict(int) for i in seg_list: wordfreq[i] +=1 wordfreq = [[i, wordfreq[i]] for i in wordfreq] wordfreq.sort(key= lambda x:x[1], reverse = True ) u"、 ".join([ i[0] + u'(' + str(i[1]) +u')' for i in wordfreq ]) # #### 基于停用词过滤的词云 # In[70]: #jieba.add_word('股灾', freq=100, tag=None) txt = reports[-1] seg_list = jieba.cut(txt, cut_all=False) seg_list = [i for i in seg_list if i not in stopwords] txt = r' '.join(seg_list) wordcloudplot(txt, txt[:4]) #file_path = '/Users/chengjun/GitHub/cjc2016/figures/wordcloud-' + txt[:4] + '.png' #plt.savefig(file_path,dpi = 300, bbox_inches="tight",transparent = True) # ### 绘制1954-2016政府工作报告词云 # In[113]: #jieba.add_word('股灾', freq=100, tag=None) for txt in reports: seg_list = jieba.cut(txt, cut_all=False) seg_list = [i for i in seg_list if i not in stopwords] txt = r' '.join(seg_list) wordcloudplot(txt, txt[:4]) file_path = '../figure/wordcloud-' + txt[:4] + '.png' plt.savefig(file_path,dpi = 400, bbox_inches="tight",\ transparent = True) # ## 5. 词向量的时间序列 # In[89]: reports[0][:500] # In[90]: reports[1][:500] # In[27]: test = jieba.analyse.textrank(reports[0], topK=200, withWeight=False) # In[57]: test = jieba.analyse.extract_tags(reports[1], topK=200, withWeight=False) # In[59]: help(jieba.analyse.extract_tags) # In[127]: import jieba.analyse wordset = [] for k, txt in enumerate(reports): print(k) top20= jieba.analyse.extract_tags(txt, topK=200, withWeight=False) for w in top20: if w not in wordset: wordset.append(w) # In[128]: len(wordset) # In[129]: print(' '.join(wordset)) # In[38]: from collections import defaultdict data = defaultdict(dict) years = [int(i[:4]) for i in reports] for i in wordset: for year in years: data[i][year] = 0 # In[39]: for txt in reports: year = int(txt[:4]) top1000= jieba.analyse.textrank(txt, topK=1000, withWeight=True) for ww in top1000: word, weight = ww if word in wordset: data[word][year]+= weight # In[45]: word_weight = [] for i in data: word_weight.append([i, np.sum(list(data[i].values()))]) # In[46]: word_weight[:2] # In[47]: word_weight.sort(key= lambda x:x[1], reverse = True ) top50 = [i[0] for i in word_weight[:50]] # In[48]: ' '.join(top50) # In[52]: def plotEvolution(word, color, linestyle, marker): cx = data[word] plt.plot(list(cx.keys()), list(cx.values()), color = color, linestyle=linestyle, marker=marker, label= word) plt.legend(loc=2,fontsize=8) plt.ylabel(u'词语重要性') # In[53]: plotEvolution(u'民主', 'g', '-', '>') plotEvolution(u'法制', 'b', '-', 's') # In[363]: plotEvolution(u'动能', 'b', '-', 's') plotEvolution(u'互联网', 'g', '-', '>') # In[364]: plotEvolution(u'工业', 'y', '-', '<') plotEvolution(u'农业', 'r', '-', 'o') plotEvolution(u'制造业', 'b', '-', 's') plotEvolution(u'服务业', 'g', '-', '>') # In[362]: plotEvolution(u'教育', 'r', '-', 'o') plotEvolution(u'社会保障', 'b', '-', 's') plotEvolution(u'医疗', 'g', '-', '>') # In[356]: plotEvolution(u'环境', 'b', '-', 's') plotEvolution(u'住房', 'purple', '-', 'o') # In[357]: plotEvolution(u'发展', 'y', '-', '<') plotEvolution(u'经济', 'r', '-', 'o') plotEvolution(u'改革', 'b', '-', 's') plotEvolution(u'创新', 'g', '-', '>') # In[359]: plotEvolution(u'社会主义', 'r', '-', 'o') plotEvolution(u'马克思主义', 'b', '-', 's') # In[208]: fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',5) for k, word in enumerate(top50[:5]): years = data[word].keys()[-40:] tfidfs = data[word].values()[-40:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # In[207]: fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',5) for k, word in enumerate(top50[5:10]): years = data[word].keys()[-40:] tfidfs = data[word].values()[-40:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # In[206]: fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',5) for k, word in enumerate(top50[10:15]): years = data[word].keys()[-40:] tfidfs = data[word].values()[-40:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # In[205]: fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',5) for k, word in enumerate(top50[15:20]): years = data[word].keys()[-40:] tfidfs = data[word].values()[-40:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # In[204]: fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',5) for k, word in enumerate(top50[20:25]): years = data[word].keys()[-40:] tfidfs = data[word].values()[-40:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # In[202]: fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',5) for k, word in enumerate(top50[25:30]): years = data[word].keys()[-30:] tfidfs = data[word].values()[-30:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # # kmeans 聚类 # In[209]: from sklearn import metrics from sklearn.metrics import pairwise_distances dataX = [] wordX = [] for word in top50: dataX.append(data[word].values()[-40:]) wordX.append(word) dataX = np.array(dataX) # In[210]: dataX # In[211]: import numpy as np from sklearn.cluster import KMeans silhouette_score = [] for cluster_num in range(2, 10): kmeans_model = KMeans(n_clusters=cluster_num, random_state=1).fit(dataX) labels = kmeans_model.labels_ sscore = metrics.silhouette_score(dataX, labels, metric='euclidean') silhouette_score.append(sscore) fig = plt.figure(figsize=(4, 2),facecolor='white') plt.plot(range(2, 10), silhouette_score) plt.xlabel('# Clusters') plt.ylabel('Silhouette Score') plt.show() # The score is bounded between -1 for incorrect clustering and +1 for highly dense clustering. Scores around zero indicate overlapping clusters. # The score is higher when clusters are dense and well separated, which relates to a standard concept of a cluster. # In[212]: kmeans_model = KMeans(n_clusters=2, random_state=1).fit(dataX) labels = kmeans_model.labels_ labels # In[213]: print ' '.join(wordX) # In[214]: print '\t'.join([wordX[index] \ for index in np.where(labels==0)[0]]) # In[215]: word_cluster1 = [wordX[index] for index in np.where(labels==0)[0]] fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',10) for k, word in enumerate(word_cluster1[:10]): years = data[word].keys()[-30:] tfidfs = data[word].values()[-30:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # In[216]: print '\t'.join([wordX[index] for index in np.where(labels==1)[0]]) # In[217]: word_cluster2 = [wordX[index] for index in np.where(labels==1)[0]] fig = plt.figure(figsize=(12, 4),facecolor='white') cmap = cm.get_cmap('rainbow_r',10) for k, word in enumerate(word_cluster2[:10]): years = data[word].keys()[-30:] tfidfs = data[word].values()[-30:] plt.plot(years, tfidfs, color=cmap(k), linestyle='-',marker='.',label= word) plt.legend(loc=1,fontsize=8) plt.show() # # 词性标注 # In[137]: import jieba.posseg as pseg words = pseg.cut("我爱北京天安门") words = [str(w).split('/') for w in words] for i in words: print i[0], i[1] # ## ICTCLAS 汉语词性标注集 # # 汉语文本词性标注标记集 # # Ag 形语素 形容词性语素。形容词代码为a,语素代码g前面置以A。 # # a 形容词 取英语形容词adjective的第1个字母。 # # ad 副形词 直接作状语的形容词。形容词代码a和副词代码d并在一起。 # # an 名形词 具有名词功能的形容词。形容词代码a和名词代码n并在一起。 # # b 区别词 取汉字“别”的声母。 # # c 连词 取英语连词conjunction的第1个字母。 # # Dg 副语素 副词性语素。副词代码为d,语素代码g前面置以D。 # # d 副词 取adverb的第2个字母,因其第1个字母已用于形容词。 # # e 叹词 取英语叹词exclamation的第1个字母。 # # f 方位词 取汉字“方” # # g 语素 绝大多数语素都能作为合成词的“词根”,取汉字“根”的声母。 # # h 前接成分 取英语head的第1个字母。 # # i 成语 取英语成语idiom的第1个字母。 # # j 简称略语 取汉字“简”的声母。 # # k 后接成分 # # l 习用语 习用语尚未成为成语,有点“临时性”,取“临”的声母。 # # m 数词 取英语numeral的第3个字母,n,u已有他用。 # # Ng 名语素 名词性语素。名词代码为n,语素代码g前面置以N。 # # n 名词 取英语名词noun的第1个字母。 # # nr 人名 名词代码n和“人(ren)”的声母并在一起。 # # ns 地名 名词代码n和处所词代码s并在一起。 # # nt 机构团体 “团”的声母为t,名词代码n和t并在一起。 # # nz 其他专名 “专”的声母的第1个字母为z,名词代码n和z并在一起。 # # o 拟声词 取英语拟声词onomatopoeia的第1个字母。 # # p 介词 取英语介词prepositional的第1个字母。 # # q 量词 取英语quantit的第1个字母。 # # r 代词 取英语代词pronoun的第2个字母,因p已用于介词。 # # s 处所词 取英语space的第1个字母。 # # Tg 时语素 时间词性语素。时间词代码为t,在语素的代码g前面置以T。 # # t 时间词 取英语time的第1个字母。 # # u 助词 取英语助词auxiliary # # Vg 动语素 动词性语素。动词代码为v。在语素的代码g前面置以V。 # # v 动词 取英语动词verb的第一个字母。 # # vd 副动词 直接作状语的动词。动词和副词的代码并在一起。 # # vn 名动词 指具有名词功能的动词。动词和名词的代码并在一起。 # # w 标点符号 # # x 非语素字 非语素字只是一个符号,字母x通常用于代表未知数、符号。 # # y 语气词 取汉字“语”的声母。 # # z 状态词 取汉字“状”的声母的前一个字母。 # # ------------------------------------------------------------------------------- # # a: 形容词 # b: 区别词 # c: 连词 # d: 副词 # e: 叹词 # g: 语素字 # h: 前接成分 # i: 习用语 # j: 简称 # k: 后接成分 # m: 数词 # n: 普通名词 # nd: 方位名词 # nh: 人名 # ni: 机构名 # nl: 处所名词 # ns: 地名 # nt: 时间词 # nz: 其他专名 # o: 拟声词 # p: 介词 # q: 量词 # r: 代词 # u: 助词 # v: 动词 # wp: 标点符号 # ws: 字符串 # x: 非语素字 # # # 主题模型 # In[230]: def getCorpus(data): processed_docs = [tokenize(doc) for doc in data] word_count_dict = gensim.corpora.Dictionary(processed_docs) print "In the corpus there are", len(word_count_dict), "unique tokens" word_count_dict.filter_extremes(no_below=5, no_above=0.2) # word must appear >5 times, and no more than 10% documents print "After filtering, in the corpus there are only", len(word_count_dict), "unique tokens" bag_of_words_corpus = [word_count_dict.doc2bow(pdoc) for pdoc in processed_docs] return bag_of_words_corpus, word_count_dict def cleancntxt(txt, stopwords): tfidf1000= jieba.analyse.extract_tags(txt, topK=1000, withWeight=False) seg_generator = jieba.cut(txt, cut_all=False) seg_list = [i for i in seg_generator if i not in stopwords] seg_list = [i for i in seg_list if i != u' '] seg_list = [i for i in seg_list if i in tfidf1000] return(seg_list) def getCnCorpus(data): processed_docs = [cleancntxt(doc) for doc in data] word_count_dict = gensim.corpora.Dictionary(processed_docs) print "In the corpus there are", len(word_count_dict), "unique tokens" #word_count_dict.filter_extremes(no_below=5, no_above=0.2) # word must appear >5 times, and no more than 10% documents print "After filtering, in the corpus there are only", len(word_count_dict), "unique tokens" bag_of_words_corpus = [word_count_dict.doc2bow(pdoc) for pdoc in processed_docs] return bag_of_words_corpus, word_count_dict def inferTopicNumber(bag_of_words_corpus, num, word_count_dict): lda_model = gensim.models.LdaModel(bag_of_words_corpus, num_topics=num, id2word=word_count_dict, passes=10) _ = lda_model.print_topics(-1) #use _ for throwaway variables. logperplexity = lda_model.log_perplexity(bag_of_words_corpus) return logperplexity def ppnumplot(topicnum,logperplexity): #做主题数与困惑度的折线图 plt.plot(topicnum,logperplexity,color="red",linewidth=2) plt.xlabel("Number of Topic") plt.ylabel("Perplexity") plt.show() # 定义一些常用的函数 def flushPrint(variable): if variable %10^2 == 0: sys.stdout.write('\r') sys.stdout.write('%s' % variable) sys.stdout.flush() def top(data): for i in data: print i def freq(data): dtable = defaultdict(int) for i in data: dtable[i] += 1 return dtable def sortdict(data): '''data is a dict, sorted by value''' return sorted(data.items(), lambda x, y: cmp(x[1], y[1]), reverse=True) # ## 对2016年政府工作报告建立主题模型 # In[118]: import urllib2 from bs4 import BeautifulSoup import sys url2016 = 'http://news.xinhuanet.com/fortune/2016-03/05/c_128775704.htm' content = urllib2.urlopen(url2016).read() soup = BeautifulSoup(content) # In[231]: gov_report_2016 = [s.text for s in soup('p')] for i in gov_report_2016[:10]:print i # In[232]: def clean_txt(txt): for i in [u'、', u',', u'—', u'!', u'。', u'《', u'》', u'(', u')']: txt = txt.replace(i, ' ') return txt # In[233]: gov_report_2016 = [clean_txt(i) for i in gov_report_2016] # In[234]: for i in gov_report_2016[:10]:print i # In[227]: len(gov_report_2016[5:-1]) # In[243]: jieba.add_word(u'屠呦呦', freq=None, tag=None) #del_word(word) ' '.join(cleancntxt(u'屠呦呦获得了诺贝尔医学奖。', stopwords)) # In[244]: processed_docs = [cleancntxt(doc, stopwords) for doc in gov_report_2016[5:-1]] word_count_dict = gensim.corpora.Dictionary(processed_docs) print "In the corpus there are", len(word_count_dict), "unique tokens" # word_count_dict.filter_extremes(no_below=5, no_above=0.2) # word must appear >5 times, and no more than 10% documents # print "After filtering, in the corpus there are only", len(word_count_dict), "unique tokens" bag_of_words_corpus = [word_count_dict.doc2bow(pdoc) for pdoc in processed_docs] # In[245]: tfidf = models.TfidfModel(bag_of_words_corpus ) corpus_tfidf = tfidf[bag_of_words_corpus ] lda_model = gensim.models.LdaModel(corpus_tfidf, num_topics=20, id2word=word_count_dict, passes=10) #lda_model = gensim.models.LdaMulticore(corpus_tfidf, num_topics=10, id2word=word_count_dict, passes=10) # In[246]: perplexity_list = [inferTopicNumber(bag_of_words_corpus, num, word_count_dict) for num in [5, 15, 20, 25, 30, 35, 40 ]] # In[247]: plt.plot([5, 15, 20, 25, 30, 35, 40], perplexity_list) # In[252]: topictermlist = lda_model.print_topics(-1) top_words = [[j.split('*')[1] for j in i.split(' + ')] for i in topictermlist] for i in top_words: print " ".join(i) + '\n' # In[249]: top_words_shares = [[j.split('*')[0] for j in i.split(' + ')] for i in topictermlist] top_words_shares = [map(float, i) for i in top_words_shares] def weightvalue(x): return (x - np.min(top_words_shares))*40/(np.max(top_words_shares) -np.min(top_words_shares)) + 10 top_words_shares = [map(weightvalue, i) for i in top_words_shares] def plotTopics(mintopics, maxtopics): num_top_words = 10 plt.rcParams['figure.figsize'] = (10.0, 4.0) n = 0 for t in range(mintopics , maxtopics): plt.subplot(2, 15, n + 1) # plot numbering starts with 1 plt.ylim(0, num_top_words) # stretch the y-axis to accommodate the words plt.xticks([]) # remove x-axis markings ('ticks') plt.yticks([]) # remove y-axis markings ('ticks') plt.title(u'主题 #{}'.format(t+1), size = 5) words = top_words[t][0:num_top_words ] words_shares = top_words_shares[t][0:num_top_words ] for i, (word, share) in enumerate(zip(words, words_shares)): plt.text(0.05, num_top_words-i-0.9, word, fontsize= np.log(share*10)) n += 1 # In[250]: plotTopics(0, 10) # In[251]: plotTopics(10, 20) # In[ ]: