2-GAN Mnist - Ipynb - Colab
2-GAN Mnist - Ipynb - Colab
ipynb - Colab
import keras
import tensorflow as tf
#x, y= mnist.load_data()
#Large images take too much time and resources.
img_rows = 28
img_cols = 28
channels = 1
img_shape = (img_rows, img_cols, channels)
model = Sequential()
#model.add(Flatten(input_shape=noise_shape))
model.add(Dense(256, input_shape=noise_shape))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(1024))
model.add(LeakyReLU(alpha=0.2))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(img_shape), activation='tanh'))
model.add(Reshape(img_shape))
model.summary()
noise = Input(shape=noise_shape)
img = model(noise) #Generated image
# plt.imshow (img)
#plt.show ()
#Given an input image, the Discriminator outputs the likelihood of the image being real.
#Binary classification - true or false (we're calling it validity)
def build_discriminator():
model = Sequential()
model.add(Flatten(input_shape=img_shape))
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256))
model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
https://colab.research.google.com/drive/1ltrowAvthEWr_rbyQoQB75B_-XITouZW?authuser=2#scrollTo=3EZrX90sibUj&printMode=true 1/5
11/16/24, 8:33 PM Copy of 27 GAN Mnist.ipynb - Colab
img = Input(shape=img_shape)
validity = model(img) #prob of fake or real
#Now that we have constructed our two models it’s time to pit them against each other.
#We do this by defining a training function, loading the data set, re-scaling our training
#images and setting the ground truths.
def train(epochs, batch_size=128, save_interval=500):
#Add channels dimension. As the input to our gen and discr. has a shape 28x28x1.
X_train = np.expand_dims(X_train, axis=3)
half_batch = int(batch_size / 2)
#Now that we have constructed our two models it’s time to pit them against each other.
#We do this by defining a training function, loading the data set, re-scaling our training
#images and setting the ground truths.
def train(epochs, batch_size=128, save_interval=500):
#Add channels dimension. As the input to our gen and discr. has a shape 28x28x1.
X_train = np.expand_dims(X_train, axis=3)
half_batch = int(batch_size / 2)
#We then loop through a number of epochs to train our Discriminator by first selecting
#a random batch of images from our true dataset, generating a set of images from our
#Generator, feeding both set of images into our Discriminator, and finally setting the
#loss parameters for both the real and fake images, as well as the combined loss.
# ---------------------
# Train Discriminator
# ---------------------
noise = np.random.normal(0, 1, (half_batch, 100))#half batch number of vectors with each size of 100 (noise value)
#And within the same loop we train our Generator, by setting the input noise and
#ultimately training the Generator to have the Discriminator label its samples as valid
#by specifying the gradient loss.
# ---------------------
# Train Generator
# ---------------------
https://colab.research.google.com/drive/1ltrowAvthEWr_rbyQoQB75B_-XITouZW?authuser=2#scrollTo=3EZrX90sibUj&printMode=true 2/5
11/16/24, 8:33 PM Copy of 27 GAN Mnist.ipynb - Colab
#Create noise vectors as input for generator.
#Create as many noise vectors as defined by the batch size.
#Based on normal distribution. Output will be of size (batch size, 100)
noise = np.random.normal(0, 1, (batch_size, 100))
#Additionally, in order for us to keep track of our training process, we print the
#progress and save the sample image output depending on the epoch interval specified.
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
def save_imgs(epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, 100))
gen_imgs = generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
#Let us also define our optimizer for easy use later on.
#That way if you change your mind, you can change it easily here
optimizer = Adam(0.0002, 0.5) #Learning rate and momentum.
#SInce we are only generating (faking) images, let us not track any metrics.
generator = build_generator()
generator.compile(loss='binary_crossentropy', optimizer=optimizer)
https://colab.research.google.com/drive/1ltrowAvthEWr_rbyQoQB75B_-XITouZW?authuser=2#scrollTo=3EZrX90sibUj&printMode=true 3/5
11/16/24, 8:33 PM Copy of 27 GAN Mnist.ipynb - Colab
#This Doesn't affect the above descriminator training.
discriminator.trainable = False
#This specifies that our Discriminator will take the images generated by our Generator
#and true dataset and set its output to a parameter called valid, which will indicate
#whether the input is real or not.
valid = discriminator(img) #Validity check on the generated image
#Here we combined the models and also set our loss function and optimizer.
#Again, we are only training the generator here.
#The ultimate goal here is for the Generator to fool the Discriminator.
# The combined model (stacked generator and discriminator) takes
# noise as input => generates images => determines validity
https://colab.research.google.com/drive/1ltrowAvthEWr_rbyQoQB75B_-XITouZW?authuser=2#scrollTo=3EZrX90sibUj&printMode=true 4/5
11/16/24, 8:33 PM Copy of 27 GAN Mnist.ipynb - Colab
#Epochs dictate the number of backward and forward propagations, the batch_size
#indicates the number of training samples per backward/forward propagation, and the
#sample_interval specifies after how many epochs we call our sample_image function.
WARNING:absl:You are saving your model as an HDF5 file via `model.save()` or `keras.saving.save_model(model)`. This file format is consi
New Section
https://colab.research.google.com/drive/1ltrowAvthEWr_rbyQoQB75B_-XITouZW?authuser=2#scrollTo=3EZrX90sibUj&printMode=true 5/5