!pip install d2l==0.17.6 %matplotlib inline import tensorflow as tf from d2l import tensorflow as d2l def init_adam_states(feature_dim): v_w = tf.Variable(tf.zeros((feature_dim, 1))) v_b = tf.Variable(tf.zeros(1)) s_w = tf.Variable(tf.zeros((feature_dim, 1))) s_b = tf.Variable(tf.zeros(1)) return ((v_w, s_w), (v_b, s_b)) def adam(params, grads, states, hyperparams): beta1, beta2, eps = 0.9, 0.999, 1e-6 for p, (v, s), grad in zip(params, states, grads): v[:].assign(beta1 * v + (1 - beta1) * grad) s[:].assign(beta2 * s + (1 - beta2) * tf.math.square(grad)) v_bias_corr = v / (1 - beta1 ** hyperparams['t']) s_bias_corr = s / (1 - beta2 ** hyperparams['t']) p[:].assign(p - hyperparams['lr'] * v_bias_corr / tf.math.sqrt(s_bias_corr) + eps) data_iter, feature_dim = d2l.get_data_ch11(batch_size=10) d2l.train_ch11(adam, init_adam_states(feature_dim), {'lr': 0.01, 't': 1}, data_iter, feature_dim); trainer = tf.keras.optimizers.Adam d2l.train_concise_ch11(trainer, {'learning_rate': 0.01}, data_iter) def yogi(params, grads, states, hyperparams): beta1, beta2, eps = 0.9, 0.999, 1e-6 for p, (v, s), grad in zip(params, states, grads): v[:].assign(beta1 * v + (1 - beta1) * grad) s[:].assign(s + (1 - beta2) * tf.math.sign( tf.math.square(grad) - s) * tf.math.square(grad)) v_bias_corr = v / (1 - beta1 ** hyperparams['t']) s_bias_corr = s / (1 - beta2 ** hyperparams['t']) p[:].assign(p - hyperparams['lr'] * v_bias_corr / tf.math.sqrt(s_bias_corr) + eps) hyperparams['t'] += 1 data_iter, feature_dim = d2l.get_data_ch11(batch_size=10) d2l.train_ch11(yogi, init_adam_states(feature_dim), {'lr': 0.01, 't': 1}, data_iter, feature_dim);