#!/usr/bin/env python # coding: utf-8 # In[1]: import numpy as np import numpy.random as rng import tensorflow as tf # In[2]: din = rng.randn(100,1) out = din * .2 + .1+ .1 * rng.normal(size=din.shape) # In[3]: get_ipython().run_line_magic('matplotlib', 'inline') import matplotlib.pyplot as plt # In[4]: plt.scatter(din, out) # In[52]: x = tf.placeholder(np.float32, [None]) w = tf.Variable([0.0]) b = tf.Variable([0.0]) y = tf.mul(x, w) + b y_ = tf.placeholder("float") loss = tf.reduce_sum(tf.pow(y_ - y, 2)) init = tf.initialize_all_variables() optim = tf.train.GradientDescentOptimizer(.005).minimize(loss) with tf.Session() as sess: feed_dict={x: din, y_: out} sess.run(init) print(sess.run(loss, feed_dict=feed_dict)) for step in range(50): sess.run(optim, feed_dict=feed_dict) cost_curr = sess.run(loss, feed_dict=feed_dict) print("Iteration", step, "Loss", cost_curr) print(sess.run(w)) beta = [sess.run(w), sess.run(b)] # In[53]: plt.scatter(din, out) x = np.linspace(din.min(), din.max(), 101) plt.plot(x, beta[0] * x + beta[1]) # In[54]: np.polyfit(din, out ,1) # In[ ]: