To use this notebook...

1) Click on the Loading Cell below and push Command + Return to run it.

2) Scroll down to the Active Cell, and enter any text you want where it says "ADD YOUR TEXT HERE" (if you don't have a block of text handy, any of Paul Graham's essays are easy to copy and paste)

3) Push Command + Return for and it will output the summary (takes 5-60 seconds, depending on the length of the text).

In [2]:
### LOADING CELL ###

import re
import nltk
nltk.download('punkt')
from nltk.tokenize import sent_tokenize
from nltk.corpus import stopwords
nltk.download('stopwords')
stop_words = stopwords.words('english')

import numpy as np
import pandas as pd
import networkx as nx
from sklearn.metrics.pairwise import cosine_similarity

word_embeddings = {}
f = open('glove.6B.100d.shortened.txt', encoding='utf-8')
for line in f:
    x = line.split()
    word = x[0]
    coefs = np.asarray(x[1:], dtype='float32')
    word_embeddings[word] = coefs
f.close()

def clean_text(text):
    sentences = [x for x in sent_tokenize(text)]
    cleanish = [s.lower() for s in pd.Series(sentences).str.replace("[^a-zA-Z]", " ", regex=True)]
    clean = [" ".join([word for word in sentence.split() if word not in stop_words]) for sentence in cleanish]
    return sentences, clean

def create_sentence_vecs(text):
    sen_vecs = []
    for sentence in text:
        if len(sentence) != 0:
            word_vecs = [word_embeddings.get(word, np.zeros((100,))) for word in sentence.split(" ")]
            sen_sum = sum(word_vecs)
            v = sen_sum / (len(sentence.split()) + 0.001)
        else:
            v = np.zeros((100,))
        sen_vecs.append(v)
    return sen_vecs

def calculate_rankings(sentences, sen_vecs):
    sim_mat = np.zeros([len(sentences), len(sentences)])
    for i in range(len(sentences)):
        for j in range(len(sentences)):
            if i != j:
                sim_mat[i][j] = cosine_similarity(sen_vecs[i].reshape(1, 100), sen_vecs[j].reshape(1, 100))[0, 0]
    nx_graph = nx.from_numpy_array(sim_mat)
    scores = nx.pagerank(nx_graph)
    ranked_sentences = sorted(((scores[i], s) for i, s in enumerate(sentences)), reverse=True)
    output = [s for (i, s) in ranked_sentences]
    return output

def create_summary(text, points=1):
    sentences, clean = clean_text(text)
    sen_vecs = create_sentence_vecs(clean)
    output = calculate_rankings(sentences, sen_vecs)
    return output[:points]
[nltk_data] Downloading package punkt to
[nltk_data]     /Users/zachobront/nltk_data...
[nltk_data]   Package punkt is already up-to-date!
[nltk_data] Downloading package stopwords to
[nltk_data]     /Users/zachobront/nltk_data...
[nltk_data]   Package stopwords is already up-to-date!
In [4]:
### ACTIVE CELL ###

text = '''
ADD YOUR TEXT HERE
'''

summary = create_summary(text)

print(f"HERE'S THE GIST: {summary[0]}")
HERE'S THE GIST: 
ADD YOUR TEXT HERE