#!/usr/bin/env python # coding: utf-8 # In[1]: # Use svg graphics, display inline get_ipython().run_line_magic('matplotlib', 'inline') get_ipython().run_line_magic('config', "InlineBackend.figure_format = 'svg'") import sys # Basic scientific computing imports import numpy as np import pandas as pd # display config pd.set_option('display.float_format', lambda x: '%.3f' % x) np.set_printoptions(suppress=True) # ad hoc imports import requests import json import os import praw print(sys.version) # # Subreddit Discovery with PRAW # # Reddit is the Wild West of the internet. Unlike many modern social platforms, it's structured into communities that each have their own purpose and standards. Instead of adding people to your personal network, you can explore and join these communitites to get a taste of what they're about. # # **Thought Experiment**: How would you go about finding new communities of people irl, without Reddit. Or the internet. Like if you actually had to get out of the house and meet people. Imagine you're dropped into a new city in 1985 and you don't know a single soul. # # The first thing you might do is find the community you're most familiar with. Somewhere where you're already familiar with the customs and know how to maneuver. For me, that community is probably `/r/bjj`, but we can use `/r/datascience` (I know you nerds love data science). Let's use PRAW to get the titles of the 10 hottest posts in that community. # In[2]: session_params = { 'user_agent': '', 'client_id': '', 'client_secret': '', } reddit = praw.Reddit(**session_params) # In[3]: column_names = ( 'date', 'title', 'author', 'day_of_week', 'hour', 'score', 'upvote_ratio', 'comment_count', 'post_id', 'url' ) rows = [] new_posts_gen = reddit.subreddit('DataScience').hot(limit=10) for post in new_posts_gen: date = str(pd.to_datetime(post.created * 1e9)).split(' ')[0] title = post.title author = post.author dow = pd.to_datetime(post.created * 1e9).day_name() hour = pd.to_datetime(post.created * 1e9).hour score = post.score upvote_ratio = post.upvote_ratio comment_count = post.num_comments post_id = post.id url = post.url row = (date, title, author, dow, hour, score, upvote_ratio, comment_count, post_id, url) rows.append(row) df = pd.DataFrame(data=rows, columns=column_names) df.head() # I create a new `Reddit` object and pass in some log in credentials. Check out [this page](https://praw.readthedocs.io/en/latest/getting_started/authentication.html#auth-url) to determine how to get `client_id` and `client_secret` keys. You can generate the pair [here](https://www.reddit.com/prefs/apps) by creating a new application. # # The code above should give you a basic idea of how to use the API. From the `Reddit` object, we can access basic Reddit entities like subreddits, redditors, and comments. Here, we access /r/DataScience and use the `hot()` method to get the top 10 hottest posts. Note that `hot()` returns a generator. # # The API's structure is actually really nice to work with. To get a post's title, we can just use the `post.title` property. The author is accessible through the `post.author` property. Very intuitive and ✨pythonic✨. Here, we iterate through the hottests posts, extract the relevant info, then dump it all into a dataframe so it's easy to deal with. # # Ok, so we've found our main community. We have a home base from which we can branch out and find new communities and hobbies to take part in. Let's look around and meet some people. On reddit, communities interact primarily through posting and commenting. Lets aggregate a list of potentially interesting friends. Maybe they'll be able to point us to other cool communities. # In[4]: cool_people = [] hot_posts = reddit.subreddit('DataScience').hot(limit=10) for post in hot_posts: if post.author: cool_people.append(post.author) for comment in post.comments: if comment.author: commentor = comment.author cool_people.append(commentor) cool_people = list(set(cool_people)) # remove duplicates print(f'{len(cool_people)} potentially interesting redditors total') # print([redditor.name for redditor in cool_people[:10]]) # Boom. Now we have a set of people who can potentially point us to new communities. Let's examine the other communities one of our friends is interacting with. # In[38]: redditor = np.random.choice(cool_people) # print(redditor.name) other_subreddits = [] for post in redditor.submissions.new(limit=10): other_subreddits.append(post.subreddit.display_name) print(np.unique(other_subreddits)) # Oooooo this redditor is into some cool stuff. While their "recommendations" are interesting, they're all sort of biased towards their food. Instead of looking at this one person's interests, I want to leverage the wisdom of the crowd here. It would be interesting to ask all of my new friends for recommendations and see what the most common answers are. Let's do that and see which communities are mentioned the most. # In[39]: cool_communities = [] for redditor in cool_people: communities = set() for post in redditor.submissions.new(limit=20): communities.add(post.subreddit.display_name) # append each distinct community to dedup for community in communities: if community != 'datascience': cool_communities.append(community) # In[42]: pd.value_counts(cool_communities)[:20] # Ok, now we're talking! we're starting to flesh out the picture of other potentially cool communities. Some of them, like AskReddit or Showerthoughts, are generally popular—they probably aren't anything new. There are ways we could potentially filter those out (Something like TF-idf), but it might be more trouble than it works. No need to be elegant here, we can simply look at the list and filter them out. # # Further down the list we start to see some really interesting communities like r/CryptoCurrency and r/datasets. So far so good. Finally, we'll make this a bit more robust by increasing the number of redditors and subreddits we look through for suggestions. # In[5]: cool_people = [] hot_posts = reddit.subreddit('datascience').hot(limit=30) for post in hot_posts: if post.author: cool_people.append(post.author) # only look at the first 20 comments for i, comment in enumerate(post.comments): if i > 20: break if comment.author: commentor = comment.author cool_people.append(commentor) cool_people = list(set(cool_people)) # remove duplicates cool_communities = [] for redditor in cool_people: communities = set() # sometimes getting a redditors new posts throws error 403 # have to manually call next() and catch it posts = redditor.submissions.new(limit=20) while posts: try: post = next(posts) except Exception as e: continue communities.add(post.subreddit.display_name) # append each distinct community to dedup for community in communities: cool_communities.append(community) # In[63]: pd.value_counts(cool_communities)