#!/usr/bin/env python # coding: utf-8 # In[33]: get_ipython().run_line_magic('load_ext', 'autoreload') get_ipython().run_line_magic('autoreload', '2') import banner topics = ['Autoencorders', 'Climate Data', 'Robot Kinematics', 'Rates of Neurons Firing During Movements', 'MNIST'] banner.reset(topics) # In[34]: banner.next_topic() # # Autoencoders # In[35]: # to interact with plot #%matplotlib widget import numpy as np import matplotlib.pyplot as plt # In[36]: import neuralnetworksA4 as nn # In[37]: # plt.figure() banner.next_topic() # ## Climate Change # # Patterns in [global temperature data](https://www.cs.colostate.edu/~anderson/cs545/notebooks/global_temps.npz) provide a way to investigate climate change. This data is from the [Coupled Model Intercomparison Project Phase 5](https://pcmdi.llnl.gov/mips/cmip5/) # In[38]: temps = np.load('global_temps.npz') temps.files # In[39]: data = temps['data'] lats = temps['lats'] lons = temps['lons'] data.shape, lats.shape, lons.shape # The data was produced by 29 different simulations of the dynamics of the earth's atmosphere. The simulations produced global temperatures for 180 years, from 1920 through 2100. # We are using the `cartopy` package. You should be able to install it in your anaconda distribution using # # conda install conda-forge::cartopy # In[40]: import cartopy.util as cutil import cartopy.crs as ccrs import cartopy.feature as cpf from mpl_toolkits.axes_grid1 import make_axes_locatable # for colorbar def draw_on_globe(data, lats, lons, cmap='coolwarm', vmin=None, vmax=None, fig=None, axes=None): if fig is None: fig = plt.figure() if axes is None: axes = fig.add_subplot(1, 1, 1, projection=ccrs.Robinson()) data_cyclic, lons_cyclic = cutil.add_cyclic_point(data, coord=lons) image = axes.pcolormesh(lons_cyclic, lats, data_cyclic, cmap=cmap, vmin=vmin, vmax=vmax, transform=ccrs.PlateCarree() ) axes.coastlines(); divider = make_axes_locatable(axes) ax_cb = divider.new_horizontal(size="5%", pad=0.1, axes_class=plt.Axes) plt.gcf().add_axes(ax_cb) plt.colorbar(image, cax=ax_cb) plt.sca(axes) # in case other calls, like plt.title(...), will be made # return image # In[41]: data.shape # In[42]: data = data - data.mean((2, 3))[:, :, np.newaxis, np.newaxis] # In[43]: draw_on_globe(data[0, 0, :, :], lats, lons) plt.title('First Simulation, Year 1920'); # In[44]: draw_on_globe(data[0, 179, :, :], lats, lons) plt.title('First Simulation, Year 2100'); # In[45]: temperature_differences = data[0, 179, :, :] - data[0, 0, :, :] draw_on_globe(temperature_differences, lats, lons) plt.title('First Simulation, Year 2100 - 1920'); # Let's swap the first two axes, so the first dimension is the years. Then we will use the different years as different samples and try to project data into two dimensions and see how the years fall in the two-dimensional plane. # In[46]: data.shape # In[47]: data_shifted = np.moveaxis(data, 0, 1) data.shape, data_shifted.shape # To save time, just use data from the first five (of 29) models. # In[48]: X = data_shifted[:, 0:5, :, :].reshape(180, -1) X.shape # In[49]: nnet = nn.NeuralNetwork(X.shape[1], [100, 50, 2, 50, 100], X.shape[1]) nnet.train(X, X, X, X, 600, method='scg') # notice the target ! plt.plot(nnet.get_performance_trace()); # In[50]: len(nnet.Zs) # In[51]: bottle = nnet.Zs[3] bottle.shape # In[52]: plt.plot(bottle[:, 0], bottle[:, 1], 'o') r = 0.02 for year, x, y in zip(range(1920, 2100), bottle[:, 0], bottle[:, 1]): plt.annotate(str(year), xy=(x+np.random.uniform(-r, r), y+np.random.uniform(-r, r)), size=10); # In[53]: bottle.shape # In[54]: plt.scatter(bottle[:, 0], bottle[:, 1], s=np.arange(180)*0.2, alpha=0.5) plt.plot(bottle[:, 0], bottle[:, 1], '-', alpha=0.2); # In[55]: nnet = nn.NeuralNetwork(X.shape[1], [100, 50, 3, 50, 100], X.shape[1]) nnet.train(X, X, X, X, 600, method='scg') # notice the target ! plt.plot(nnet.get_performance_trace()); # In[57]: from mpl_toolkits.mplot3d import Axes3D plt.figure(figsize=(10, 10)) ax = plt.subplot(projection='3d') bottle = nnet.Zs[3] ax.plot(bottle[:, 0], bottle[:, 1], bottle[:, 2], lw=1, alpha=0.2) ax.scatter(bottle[:, 0], bottle[:, 1], bottle[:, 2], s=np.arange(180)*0.2, alpha=0.5); # In[58]: plt.figure() banner.next_topic() # ## Robot Kinematics # In[59]: from math import pi import time import IPython.display as ipd # for display and clear_output class Robot(): def __init__(self, link_lengths): self.n_links = len(link_lengths) self.link_lengths = np.array(link_lengths) self.joint_angles = np.zeros(self.n_links) self.points = [[10, 10] for _ in range(self.n_links + 1)] self.lim = sum(link_lengths) self.update_points() def update_joints(self, joint_angles): self.joint_angles = joint_angles self.update_points() def add_to_joints(self, joint_angle_deltas): self.joint_angles += joint_angle_deltas too_high = self.joint_angles > 2 * pi self.joint_angles[too_high] = self.joint_angles[too_high] - 2 * pi too_low = self.joint_angles < 0 self.joint_angles[too_low] = self.joint_angles[too_low] + 2 * pi self.update_points() def update_points(self): for i in range(1, self.n_links + 1): self.points[i][0] = self.points[i - 1][0] + \ self.link_lengths[i - 1] * \ np.cos(np.sum(self.joint_angles[:i])) self.points[i][1] = self.points[i - 1][1] + \ self.link_lengths[i - 1] * \ np.sin(np.sum(self.joint_angles[:i])) self.end_effector = np.array(self.points[self.n_links]).T def get_state(self): return np.hstack((np.sin(self.joint_angles), np.cos(self.joint_angles))) def plot(self, obstacles=[]): for i in range(self.n_links + 1): if i is not self.n_links: plt.plot([self.points[i][0], self.points[i + 1][0]], [self.points[i][1], self.points[i + 1][1]], 'r-') plt.plot(self.points[i][0], self.points[i][1], 'k.') plt.axis('square') plt.xlim([-1, 21]) plt.ylim([-1, 21]) # plt.pause(1e-2) def illustrate(self): for i in range(10): action = np.random.uniform(0.1, 0.2, size=self.n_links) # action = np.random.choice([0, 0.2, 0, 0.2], size=self.n_links) # action = [0, 0, 0, 0.1] self.add_to_joints(action) self.plot() # In[60]: arm = Robot([4., 5., 4., 6.]) plt.figure() arm.illustrate() # In[61]: arm = Robot([4., 5., 4., 6.]) graphics = False points = [] for i in range(1000): action = np.random.uniform(0.1, 0.2, size=arm.n_links) arm.add_to_joints(action) if graphics: plt.clf() arm.plot() ipd.clear_output(wait=True) ipd.display(fig) time.sleep(0.2) # 0.2 seconds ipd.clear_output(wait=True) points.append(arm.points[1] + arm.points[2]) # In[62]: points = np.array(points) points.shape # In[63]: for p in points[:10]: arm.update_joints(p) arm.plot() # In[64]: X = np.array(points) nnet = nn.NeuralNetwork(X.shape[1], [100, 10, 2, 50], X.shape[1]) nnet.train(X, X, X, X, 5000, method='scg') plt.plot(nnet.get_performance_trace()) Y = nnet.use(X) plt.figure(figsize=(12, 12)) plt.plot(X, '--') plt.plot(Y) plt.figure(figsize=(12, 12)) bottle = nnet.Zs[3] plt.plot(bottle[:, 0], bottle[:, 1], '-o'); # In[65]: nnet = nn.NeuralNetwork(X.shape[1], [100, 10, 3, 10, 50], X.shape[1]) # nnet = nn.NeuralNetwork(X.shape[1], [100, 100, 50, 3, 50, 100], X.shape[1]) nnet.train(X, X, X, X, 5000, method='scg') plt.plot(nnet.get_performance_trace()) Y = nnet.use(X) plt.figure(figsize=(12, 12)) plt.plot(X, '--') plt.plot(Y) from mpl_toolkits.mplot3d import Axes3D plt.figure(figsize=(12, 12)) ax = plt.subplot(projection='3d') bottle = nnet.Zs[3] plt.plot(bottle[:, 0], bottle[:, 1], bottle[:, 2], '-o', alpha=0.5); # In[66]: banner.next_topic() # ## Rates of Neurons Firing During Movements # From [Neural Latents Benchmark](https://neurallatents.github.io/datasets) site. Download [this data file](https://www.cs.colostate.edu/~anderson/cs545/notebooks/neuron_rates.npz). # In[67]: file = np.load('neuron_rates.npz') rates = file['rates'] colors = file['colors'] rates.shape, colors.shape # In[68]: plt.plot(rates[:500, :20]); # In[69]: X = rates X.shape # In[70]: nnet = nn.NeuralNetwork(X.shape[1], [50, 2, 50], X.shape[1]) # nnet = nn.NeuralNetwork(X.shape[1], [100, 100, 50, 3, 50, 100], X.shape[1]) nnet.train(X, X, X, X, 8000, method='scg') # In[71]: plt.plot(nnet.get_performance_trace()); # In[72]: Y = nnet.use(X) bottle = nnet.Zs[2] bottle.shape # In[73]: n_trials = 27 bottle = bottle.reshape(n_trials, -1, 2) # In[74]: bottle.shape # In[75]: for trial, color in zip(bottle, colors): plt.plot(trial[:, 0], trial[:, 1], color=color) plt.scatter(trial[0, 0], trial[0, 1], color=color) # Since we are looking for "dynamics", meaning changes in time, let's try predicting the change in firing rates, rather than the firing rates themselves. # In[76]: X_trials = X.copy().reshape(n_trials, 100, -1) X_trials.shape # In[77]: X_trials_diff = X_trials[:, 1:, :] - X_trials[:, :-1, :] X_trials_diff.shape # In[78]: X_trials = X_trials[:, :-1, :] X_trials.shape # In[79]: X_trials = X_trials.reshape(-1, 182) X_trials_diff = X_trials_diff.reshape(-1, 182) X_trials.shape, X_trials_diff.shape # In[80]: nnet = nn.NeuralNetwork(X.shape[1], [50, 2, 50], X.shape[1]) # nnet = nn.NeuralNetwork(X.shape[1], [100, 100, 50, 3, 50, 100], X.shape[1]) nnet.train(X_trials, X_trials_diff, X_trials, X_trials_diff, 10000, method='scg') plt.plot(nnet.get_performance_trace()); # In[81]: Y = nnet.use(X) bottle = nnet.Zs[2] bottle.shape # In[82]: n_trials = 27 bottle = bottle.reshape(n_trials, -1, 2) bottle.shape # In[83]: for trial, color in zip(bottle, colors): plt.plot(trial[:, 0], trial[:, 1], color=color) plt.scatter(trial[0, 0], trial[0, 1], color=color) # In[84]: banner.next_topic() # # MNIST # In[89]: import gzip import pickle with gzip.open('mnist.pkl.gz', 'rb') as f: train_set, valid_set, test_set = pickle.load(f, encoding='latin1') Xtrain = train_set[0] Ttrain = train_set[1].reshape(-1, 1) Xval = valid_set[0] Tval = valid_set[1].reshape(-1, 1) Xtest = test_set[0] Ttest = test_set[1].reshape(-1, 1) print(Xtrain.shape, Ttrain.shape, Xval.shape, Tval.shape, Xtest.shape, Ttest.shape) # In[96]: nnet = nn.NeuralNetwork(Xtrain.shape[1], [50, 20, 2, 20, 50], Xtrain.shape[1]) nnet.train(Xtrain, Xtrain, Xtrain, Xtrain, 1000, method='scg') plt.plot(nnet.get_performance_trace()); # In[97]: bottle = nnet.Zs[3] n_plot = 500 bottle = bottle[:n_plot, :] plt.figure(figsize=(8, 8)) for digit, x, y in zip(Ttrain[:n_plot], bottle[:, 0], bottle[:, 1]): plt.annotate(str(digit[0]), xy=(x, y), size=8) a, b = np.min(bottle) * 1.1, np.max(bottle) * 1.1 plt.xlim(a, b) plt.ylim(a, b); # In[98]: fig = plt.figure(figsize=(8, 8)) plt.scatter(bottle[:, 0], bottle[:, 1], s=1, alpha=0.1) scale = 0.015 for digit, x, y in zip(Xtrain[:n_plot], bottle[:, 0], bottle[:, 1]): ax = fig.add_axes([x - scale, y - scale, 2 * scale, 2 * scale]) ax.imshow(-digit.reshape(28, 28), cmap='gray') ax.axis('off') # In[99]: nnet.train(Xtrain, Xtrain, Xtrain, Xtrain, 1000, method='scg') plt.plot(nnet.get_performance_trace()); # In[103]: bottle = nnet.Zs[3] n_plot = 500 bottle = bottle[:n_plot, :] plt.figure(figsize=(8, 8)) for digit, x, y in zip(Ttrain[:n_plot], bottle[:, 0], bottle[:, 1]): plt.annotate(str(digit[0]), xy=(x, y), size=8) a, b = np.min(bottle) * 1.1, np.max(bottle) * 1.1 plt.xlim(a, b) plt.ylim(a, b); # In[104]: fig = plt.figure(figsize=(8, 8)) plt.scatter(bottle[:, 0], bottle[:, 1], s=1, alpha=0.1) scale = 0.015 for digit, x, y in zip(Xtrain[:n_plot], bottle[:, 0], bottle[:, 1]): ax = fig.add_axes([x - scale, y - scale, 2 * scale, 2 * scale]) ax.imshow(-digit.reshape(28, 28), cmap='gray') ax.axis('off') # In[ ]: