Tensorflow Cheat Sheet For Deep Learning Model Building
Tensorflow Cheat Sheet For Deep Learning Model Building
da da da da da
ta ta ta ta ta
sc sc sc sc sc
ie ie ie ie ie
nc nc nc nc nc
eb eb eb eb eb
ra ra ra ra ra
in in in in in
@ @ @ @ @
da da da da da
ta ta ta ta ta
sc sc sc sc sc
ie ie ie ie ie
nc nc nc nc nc
eb eb eb eb eb
Learning
DATA SCIENCE BRAIN
in in in in in
@datasciencebrain
@ @ @ @ @
da da da da da
ta ta ta ta ta
sc sc sc sc sc
ie ie ie ie ie
nc nc nc nc nc
Model Building
CHEAT SHEET
TENSORFLOW
eb eb eb eb eb
ra ra ra ra ra
in in in in in
Our website: deepakjosecodes.com
in
in
in
model = models.Sequential()
ra
ra
ra
eb
eb
eb
model.add(layers.Flatten(input_shape=(input_size,))) #
nc
nc
nc
ie
ie
ie
Adjust input_size based on your data
sc
sc
sc
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
# Add hidden layers
eb
eb
eb
nc
nc
nc
model.add(layers.Dense(128, activation='relu'))
ie
ie
ie
sc
sc
sc
model.add(layers.Dropout(0.2)) # Optional: Add
ta
ta
ta
da
da
da
dropout for regularization
in
in
in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
# Add output layer
ie
ie
ie
sc
sc
sc
model.add(layers.Dense(output_size,
ta
ta
ta
da
da
da
in
in
in
activation='softmax')) # Adjust output_size based on
@
@
ra
ra
ra
eb
eb
eb
your problem
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
model.compile(optimizer='adam',
da
da
da
in
in
in
@
@
ra
ra
ra
loss='sparse_categorical_crossentropy', # Use
eb
eb
eb
nc
nc
nc
ie
ie
sc
sc
sc
metrics=['accuracy'])
ta
ta
ta
da
da
da
@
in
in
in
model = models.Sequential()
ra
ra
ra
eb
eb
eb
model.add(layers.Conv2D(32, (3, 3), activation='relu',
nc
nc
nc
ie
ie
ie
input_shape=(img_height, img_width, channels)))
sc
sc
sc
ta
ta
ta
model.add(layers.MaxPooling2D((2, 2)))
da
da
da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
# Add more convolutional and pooling layers as
ie
ie
ie
sc
sc
sc
needed
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
model.add(layers.Flatten())
nc
nc
nc
ie
ie
ie
model.add(layers.Dense(128, activation='relu'))
sc
sc
sc
ta
ta
ta
model.add(layers.Dense(output_size,
da
da
da
in
in
in
@
@
ra
ra
ra
activation='softmax'))
eb
eb
eb
nc
nc
nc
ie
ie
ie
sc
sc
sc
model.compile(optimizer='adam',
ta
ta
ta
da
da
da
in
in
loss='sparse_categorical_crossentropy', in
@
@
ra
ra
ra
eb
eb
eb
metrics=['accuracy'])
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
@
in
in
in
model = models.Sequential()
ra
ra
ra
eb
eb
eb
model.add(layers.SimpleRNN(128,
nc
nc
nc
ie
ie
ie
sc
sc
sc
activation='relu', input_shape=(timesteps,
ta
ta
ta
da
da
da
in
in
in
features)))
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
ie
ie
ie
sc
sc
sc
# Add more recurrent layers or use LSTM/GRU
ta
ta
ta
da
da
da
layers
in
in
in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
ie
ie
ie
model.add(layers.Dense(output_size,
sc
sc
sc
ta
ta
ta
da
da
activation='softmax')) da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
ie
ie
ie
model.compile(optimizer='adam',
sc
sc
sc
ta
ta
ta
da
da
da
loss='sparse_categorical_crossentropy',
in
in
in
@
@
ra
ra
ra
eb
eb
eb
metrics=['accuracy'])
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
@
in
in
in
model = models.Sequential()
ra
ra
ra
eb
eb
eb
model.add(layers.LSTM(128, activation='relu',
nc
nc
nc
ie
ie
ie
sc
sc
sc
input_shape=(timesteps, features)))
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
# Add more LSTM layers if needed
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
model.add(layers.Dense(output_size,
in
in
in
@
@
ra
ra
ra
eb
eb
eb
activation='softmax'))
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
model.compile(optimizer='adam', da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
loss='sparse_categorical_crossentropy',
nc
nc
nc
ie
ie
ie
metrics=['accuracy'])
sc
sc
sc
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
@
in
in
in
model = models.Sequential()
ra
ra
ra
eb
eb
eb
model.add(layers.GRU(128, activation='relu',
nc
nc
nc
ie
ie
ie
sc
sc
sc
input_shape=(timesteps, features)))
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
# Add more GRU layers if needed
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
model.add(layers.Dense(output_size,
in
in
in
@
@
ra
ra
ra
eb
eb
eb
activation='softmax'))
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
model.compile(optimizer='adam', da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
loss='sparse_categorical_crossentropy',
nc
nc
nc
ie
ie
ie
metrics=['accuracy'])
sc
sc
sc
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
@
in
in
ra
ra
ra
eb
eb
eb
nc
nc
nc
# Load pre-trained VGG16 model without the top layer
ie
ie
ie
sc
sc
sc
base_model = VGG16(weights='imagenet', include_top=False,
ta
ta
ta
da
da
da
input_shape=(img_height, img_width, channels))
in
in
in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
# Freeze convolutional layers
ie
ie
ie
sc
sc
sc
for layer in base_model.layers:
ta
ta
ta
layer.trainable = False
da
da
da
in
in
in
@
@
ra
ra
ra
eb
eb
eb
model = models.Sequential()
nc
nc
nc
ie
ie
ie
model.add(base_model)
sc
sc
sc
ta
ta
ta
da
da
da
in
in
in
@
@
# Add custom classification layers
ra
ra
ra
eb
eb
eb
model.add(layers.Flatten())
nc
nc
nc
ie
ie
model.add(layers.Dense(256, activation='relu'))
ie
sc
sc
sc
ta
ta
ta
model.add(layers.Dropout(0.5))
da
da
da
in
in
model.add(layers.Dense(output_size, activation='softmax')) in
@
@
ra
ra
ra
eb
eb
eb
nc
nc
nc
ie
ie
ie
model.compile(optimizer='adam',
sc
sc
sc
loss='sparse_categorical_crossentropy',
ta
ta
ta
da
da
da
metrics=['accuracy'])
@
07 BATCH NORMALIZATION
model.add(layers.BatchNormalization())
in
in
in
ra
ra
ra
eb
eb
eb
nc
nc
nc
08 DATA AUGMENTATION
ie
ie
ie
sc
sc
sc
ta
ta
ta
from tensorflow.keras.preprocessing.image import
da
da
da
in
in
in
@
@
ra
ra
ra
ImageDataGenerator
eb
eb
eb
nc
nc
nc
ie
ie
ie
datagen = ImageDataGenerator(
sc
sc
sc
ta
ta
ta
rotation_range=20,
da
da
da
in
in
in
@
@
width_shift_range=0.2,
ra
ra
ra
eb
eb
eb
height_shift_range=0.2,
nc
nc
nc
ie
ie
ie
horizontal_flip=True,
sc
sc
sc
ta
ta
ta
shear_range=0.2
da
da
da
in
in
in
)
@
@
ra
ra
ra
eb
eb
eb
nc
nc
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
in
in
@
ra
ra
ra
eb
eb
eb
epochs=epochs)
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
da
da
da
@
09 EARLY STOPPING
in
in
ra
ra
ra
eb
eb
eb
nc
nc
nc
early_stopping = EarlyStopping(monitor='val_loss', patience=3,
ie
ie
ie
sc
sc
sc
restore_best_weights=True)
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
model.fit(X_train, y_train, epochs=epochs, validation_data=
eb
eb
eb
nc
nc
nc
(X_val, y_val), callbacks=[early_stopping])
ie
ie
ie
sc
sc
sc
ta
ta
ta
10
da
da
da
LEARNING RATE SCHEDULER
in
in
in
@
@
ra
ra
ra
eb
eb
eb
from tensorflow.keras.callbacks import LearningRateScheduler
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
ta
def scheduler(epoch, lr):
da
da
da
in
in
in
if epoch % 10 == 0 and epoch != 0:
@
@
ra
ra
ra
eb
eb
eb
return lr * 0.9
nc
nc
else: nc
ie
ie
ie
sc
sc
sc
return lr
ta
ta
ta
da
da
da
in
in
in
@
@
ra
ra
ra
lr_scheduler = LearningRateScheduler(scheduler)
eb
eb
eb
nc
nc
nc
ie
ie
ie
sc
sc
sc
ta
ta
da
da
da
11 EARLY STOPPING
from sklearn.model_selection import GridSearchCV
in
in
in
ra
ra
ra
from tensorflow.keras.wrappers.scikit_learn import KerasClassifier
eb
eb
eb
nc
nc
nc
# Define your model creation function
ie
ie
ie
def create_model(optimizer='adam', hidden_units=128, dropout_rate=0.2):
sc
sc
sc
model = models.Sequential()
ta
ta
ta
model.add(layers.Flatten(input_shape=(input_size,)))
da
da
da
in
in
in
@
@
# Add hidden layers
ra
ra
ra
eb
eb
eb
model.add(layers.Dense(hidden_units, activation='relu'))
nc
nc
nc
model.add(layers.Dropout(dropout_rate))
ie
ie
ie
sc
sc
sc
# Add output layer
ta
ta
ta
model.add(layers.Dense(output_size, activation='softmax'))
da
da
da
in
in
in
@
@
model.compile(optimizer=optimizer,
ra
ra
ra
eb
eb
eb
loss='sparse_categorical_crossentropy',
nc
nc
nc
metrics=['accuracy'])
return model
ie
ie
ie
sc
sc
sc
ta
ta
ta
# Create a KerasClassifier with your model creation function
da
da
da
model = KerasClassifier(build_fn=create_model, epochs=10, batch_size=32, verbose=0)
in
in
in
@
@
ra
ra
ra
eb
eb
eb
# Define the hyperparameters to search
param_grid = {
nc
nc
nc
'optimizer': ['adam', 'sgd', 'rmsprop'],
ie
ie
ie
sc
sc
sc
'hidden_units': [64, 128, 256],
ta
ta
ta
da
da
}
in
in
in
@
@
ra
ra
ra
# Use GridSearchCV for hyperparameter search
eb
eb
eb
nc
nc
ie
ie
sc
sc
sc
ta
ta
ta
da
da
Was it
helpful?
Checkout our YouTube Channel
for Machine Learning Projects and Other
amazing Data Science Related Content
youtube.com/@dsbrain
SHARE
Checkout Our
Other Posts