#!/usr/bin/env python # coding: utf-8 # # Topic Modeling # ## Introduction # Another popular text analysis technique is called topic modeling. The ultimate goal of topic modeling is to find various topics that are present in your corpus. Each document in the corpus will be made up of at least one topic, if not multiple topics. # # In this notebook, we will be covering the steps on how to do **Latent Dirichlet Allocation (LDA)**, which is one of many topic modeling techniques. It was specifically designed for text data. # # To use a topic modeling technique, you need to provide (1) a document-term matrix and (2) the number of topics you would like the algorithm to pick up. # # Once the topic modeling technique is applied, your job as a human is to interpret the results and see if the mix of words in each topic make sense. If they don't make sense, you can try changing up the number of topics, the terms in the document-term matrix, model parameters, or even try a different model. # ## Topic Modeling - Attempt #1 (All Text) # In[1]: # Let's read in our document-term matrix import pandas as pd import pickle data = pd.read_pickle('dtm_stop.pkl') data # In[2]: # # Uncomment to setuo LDA logging to a file # import logging # logging.basicConfig(filename='lda_model.log', format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO) # Import the necessary modules for LDA with gensim # Terminal / Anaconda Navigator: conda install -c conda-forge gensim from gensim import matutils, models import scipy.sparse # sparse matrix format is required for gensim # In[3]: # One of the required inputs is a term-document matrix (transpose of document-term) tdm = data.transpose() tdm.head() # In[4]: # We're going to put the term-document matrix into a new gensim format, from df --> sparse matrix --> gensim corpus sparse_counts = scipy.sparse.csr_matrix(tdm) corpus = matutils.Sparse2Corpus(sparse_counts) # In[5]: # Gensim also requires a dictionary of all the terms and their respective location in the term-document matrix cv = pickle.load(open("cv_stop.pkl", "rb")) # CountVectorizor creates dtm id2word = dict((v, k) for k, v in cv.vocabulary_.items()) # Now that we have the corpus (term-document matrix) and id2word (dictionary of location: term), we're ready to train the LDA model. We need to specify two other parameters - the number of topics and the number of training passes. Let's start the number of topics at 2, see if the results make sense, and increase the number from there. # In[6]: # Now that we have the corpus (term-document matrix) and id2word (dictionary of location: term), # we need to specify two other parameters as well - the number of topics and the number of passes. # *Note: gensim refers to it as corpus, we call it term-document matrix # passes is how many times the algorithm is supposed to pass over the whole corpus import numpy as np lda = models.LdaModel(corpus=corpus, id2word=id2word, num_topics=2, passes=10, random_state=np.random.RandomState(seed=10)) for topic, topwords in lda.show_topics(): print("Topic", topic, "\n", topwords, "\n") # **Increment the number of topics to see if it improves** # In[7]: # LDA for num_topics = 3 lda = models.LdaModel(corpus=corpus, id2word=id2word, num_topics=3, passes=10, random_state=np.random.RandomState(seed=10)) for topic, topwords in lda.show_topics(): print("Topic", topic, "\n", topwords, "\n") # **Increment the number of topics again** # In[8]: # LDA for num_topics = 4 lda = models.LdaModel(corpus=corpus, id2word=id2word, num_topics=4, passes=10) for topic, topwords in lda.show_topics(): print("Topic", topic, "\n", topwords, "\n") # These topics aren't looking too meaningful, and there's a lot of overlap between the topics. We've tried modifying our parameters. Let's try modifying our terms list as well. # ## Topic Modeling - Attempt #2 (Nouns Only) # One popular trick is to look only at terms that are from one part of speech (only nouns, only adjectives, etc.). Check out the UPenn tag set: https://www.ling.upenn.edu/courses/Fall_2003/ling001/penn_treebank_pos.html. # # For the 2nd attempt let's look at nouns only. The tag for nouns is NN. # In[9]: # Let's create a function to pull out nouns from a string of text from nltk import word_tokenize, pos_tag def nouns(text): '''Given a string of text, tokenize the text and pull out only the nouns.''' is_noun = lambda pos: pos[:2] == 'NN' # pos = part-of-speech tokenized = word_tokenize(text) all_nouns = [word for (word, pos) in pos_tag(tokenized) if is_noun(pos)] return ' '.join(all_nouns) # In[10]: # Read in the cleaned data, before the CountVectorizer step data_clean = pd.read_pickle('data_clean.pkl') # In[11]: # Apply the nouns function to the transcripts to filter only on nouns data_nouns = pd.DataFrame(data_clean.transcript.apply(nouns)) data_nouns # In[12]: # Create a new document-term matrix using only nouns from sklearn.feature_extraction import text from sklearn.feature_extraction.text import CountVectorizer # Re-add the additional stop words since we are recreating the document-term matrix add_stop_words = ['like', 'im', 'know', 'just', 'dont', 'thats', 'right', 'people', 'youre', 'got', 'gonna', 'time', 'think', 'yeah', 'said'] stop_words = text.ENGLISH_STOP_WORDS.union(add_stop_words) # Recreate a document-term matrix with only nouns cv_nouns = CountVectorizer(stop_words=stop_words) data_cv_nouns = cv_nouns.fit_transform(data_nouns.transcript) data_dtm_nouns = pd.DataFrame(data_cv_nouns.toarray(), columns=cv_nouns.get_feature_names()) data_dtm_nouns.index = data_nouns.index data_dtm_nouns # In[13]: # Create the gensim corpus - this time with nouns only corpus_nouns = matutils.Sparse2Corpus(scipy.sparse.csr_matrix(data_dtm_nouns.transpose())) # Create the vocabulary dictionary the all terms and their respective location id2word_nouns = dict((v, k) for k, v in cv_nouns.vocabulary_.items()) # In[14]: # Let's start with 2 topics lda_nouns = models.LdaModel(corpus=corpus_nouns, num_topics=2, id2word=id2word_nouns, passes=10) lda_nouns.print_topics() # In[15]: # Let's try topics = 3 lda_nouns = models.LdaModel(corpus=corpus_nouns, num_topics=3, id2word=id2word_nouns, passes=10) lda_nouns.print_topics() # In[16]: # Let's try 4 topics lda_nouns = models.LdaModel(corpus=corpus_nouns, num_topics=4, id2word=id2word_nouns, passes=10) lda_nouns.print_topics() # **I still don't see the topics becoming clear, so in attempt 3 I will try both nouns and adjectivs.** # ## Topic Modeling - Attempt #3 (Nouns and Adjectives) # In[17]: # Create a function to pull out nouns and adjectives from a string of text def nouns_adj(text): '''Given a string of text, tokenize the text and pull out only the nouns and adjectives.''' is_noun_adj = lambda pos: pos[:2] == 'NN' or pos[:2] == 'JJ' tokenized = word_tokenize(text) nouns_adj = [word for (word, pos) in pos_tag(tokenized) if is_noun_adj(pos)] return ' '.join(nouns_adj) # In[18]: # Apply the nouns function to the transcripts to filter only on nouns data_nouns_adj = pd.DataFrame(data_clean.transcript.apply(nouns_adj)) data_nouns_adj # In[19]: # Create a new document-term matrix using only nouns and adjectives, also remove common words with max_df cv_nouns_adj = CountVectorizer(stop_words=stop_words, max_df=.8) # Remove corpus-specific stop words with max_df, if occurs >80% data_cv_nouns_adj = cv_nouns_adj.fit_transform(data_nouns_adj.transcript) data_dtm_nouns_adj = pd.DataFrame(data_cv_nouns_adj.toarray(), columns=cv_nouns_adj.get_feature_names()) data_dtm_nouns_adj.index = data_nouns_adj.index # In[20]: # Create the gensim corpus corpus_nouns_adj = matutils.Sparse2Corpus(scipy.sparse.csr_matrix(data_dtm_nouns_adj.transpose())) # Create the vocabulary dictionary id2word_nouns_adj = dict((v, k) for k, v in cv_nouns_adj.vocabulary_.items()) # In[21]: # Let's start with 2 topics lda_nouns_adj = models.LdaModel(corpus=corpus_nouns_adj, num_topics=2, id2word=id2word_nouns_adj, passes=10) lda_nouns_adj.print_topics() # In[22]: # Let's try 3 topics lda_nouns_adj = models.LdaModel(corpus=corpus_nouns_adj, num_topics=3, id2word=id2word_nouns_adj, passes=10) lda_nouns_adj.print_topics() # In[23]: # Let's try 4 topics lda_nouns_adj = models.LdaModel(corpus=corpus_nouns_adj, num_topics=4, id2word=id2word_nouns_adj, passes=10) lda_nouns_adj.print_topics() # In[24]: # Keep it at 4 topics, but experiment with other hyper-parameters: # Increase the number of passes # Change alpha to really small value or symmetric or auto # Change eta to very small values # Set random_state to persist results on every run. By default LDA output varies on each run. lda_nouns_adj = models.LdaModel(corpus=corpus_nouns_adj, num_topics=4, id2word=id2word_nouns_adj, passes=100, alpha='symmetric', eta=0.00001, random_state=np.random.RandomState(seed=10)) for topic, topwords in lda_nouns_adj.show_topics(): print("Topic", topic, "\n", topwords, "\n") # **Unfortunately tuning the hyper-parameters did not yield any meaningful topics.**