Notebook
W_conv3 = tf.Variable(tf.truncated_normal([5, 5, 64, 128], stddev=0.1)) b_conv3 = tf.Variable(tf.constant(0.1, shape=[128])) #need 64 biases for 64 outputs convolve3= tf.nn.conv2d(conv2, W_conv3, strides=[1, 1, 1, 1], padding='SAME')+ b_conv3 h_conv3 = tf.nn.relu(convolve3) conv3 = tf.nn.max_pool(h_conv3, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #max_pool_2x2 conv3
W_conv4 = tf.Variable(tf.truncated_normal([5, 5, 128, 256], stddev=0.1)) b_conv4 = tf.Variable(tf.constant(0.1, shape=[256])) #need 64 biases for 64 outputs convolve4= tf.nn.conv2d(conv3, W_conv4, strides=[1, 1, 1, 1], padding='SAME')+ b_conv4 h_conv4 = tf.nn.relu(convolve4) conv4 = tf.nn.max_pool(h_conv4, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME') #max_pool_2x2
nodes2 = 256 W_fc2 = tf.Variable(tf.truncated_normal([layer_drop1.get_shape().as_list()[1], nodes2], stddev=0.1)) b_fc2 = tf.Variable(tf.constant(0.1, shape=[nodes2])) h_fcl2 = tf.matmul(layer_drop1, W_fc2) + b_fc2 fc_layer2 = tf.nn.relu(h_fcl2) # ??? fc_layer2
layer_drop2 = tf.nn.dropout(fc_layer2, keep_prob)