#!/usr/bin/env python # coding: utf-8 # # Concise Implementation of Linear Regression # In[1]: import d2l from mxnet import autograd, np, npx, gluon npx.set_np() true_w = np.array([2, -3.4]) true_b = 4.2 features, labels = d2l.synthetic_data(true_w, true_b, 1000) # Reading Data # In[2]: def load_array(data_arrays, batch_size, is_train=True): dataset = gluon.data.ArrayDataset(*data_arrays) return gluon.data.DataLoader(dataset, batch_size, shuffle=is_train) batch_size = 10 data_iter = load_array((features, labels), batch_size) for X, y in data_iter: print('X =\n%sy =\n%s' %(X, y)) break # Define the Model and initialize Model Parameters # In[3]: from mxnet.gluon import nn from mxnet import init net = nn.Sequential() net.add(nn.Dense(1)) net.initialize(init.Normal(sigma=0.01)) # Define the loss function and optimization algorithm # In[4]: from mxnet import gluon loss = gluon.loss.L2Loss() trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.03}) # Training # In[5]: for epoch in range(1, 4): for X, y in data_iter: with autograd.record(): l = loss(net(X), y) l.backward() trainer.step(batch_size) l = loss(net(features), labels) print('epoch %d, loss: %f' % (epoch, l.mean())) w = net[0].weight.data() print('Error in estimating w', true_w.reshape(w.shape) - w) b = net[0].bias.data() print('Error in estimating b', true_b - b)