Generator

In this section, we are going to implement the first core part of the GAN network. The architecture and implementation of this part will follow the original DCGAN paper:

def generator(latent_z, output_image_dim, reuse_vars=False, leaky_alpha=0.2, is_training=True, size_mult=128):
with tf.variable_scope('generator', reuse=reuse_vars):
# define a fully connected layer
fully_conntected_1 = tf.layers.dense(latent_z, 4 * 4 * size_mult * 4)

# Reshape it from 2D tensor to 4D tensor to be fed to the convolution neural network
reshaped_out_1 = tf.reshape(fully_conntected_1, (-1, 4, 4, size_mult * 4))
batch_normalization_1 = tf.layers.batch_normalization(reshaped_out_1, training=is_training)
leaky_output_1 = tf.maximum(leaky_alpha * batch_normalization_1, batch_normalization_1)

conv_layer_1 = tf.layers.conv2d_transpose(leaky_output_1, size_mult * 2, 5, strides=2, padding='same')
batch_normalization_2 = tf.layers.batch_normalization(conv_layer_1, training=is_training)
leaky_output_2 = tf.maximum(leaky_alpha * batch_normalization_2, batch_normalization_2)

conv_layer_2 = tf.layers.conv2d_transpose(leaky_output_2, size_mult, 5, strides=2, padding='same')
batch_normalization_3 = tf.layers.batch_normalization(conv_layer_2, training=is_training)
leaky_output_3 = tf.maximum(leaky_alpha * batch_normalization_3, batch_normalization_3)

# defining the output layer
logits_layer = tf.layers.conv2d_transpose(leaky_output_3, output_image_dim, 5, strides=2, padding='same')

output = tf.tanh(logits_layer)

return output
..................Content has been hidden....................

You can't read the all page of ebook, please click here login for view all page.
Reset