Implementation of CNN
# import the necessary libraries
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from itertools import product
# set the param
plt.rc('figure', autolayout=True)
plt.rc('image', cmap='magma')
# define the kernel
kernel = tf.constant([[-1, -1, -1],
[-1, 8, -1],
[-1, -1, -1],
])
# load the image
image = tf.io.read_file('Ganesh.jpg')
image = tf.io.decode_jpeg(image, channels=1)
image = tf.image.resize(image, size=[300, 300])
# plot the image
img = tf.squeeze(image).numpy()
plt.figure(figsize=(5, 5))
plt.imshow(img, cmap='gray')
plt.axis('off')
plt.title('Original Gray Scale image')
plt.show();
# Reformat
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.expand_dims(image, axis=0)
kernel = tf.reshape(kernel, [*kernel.shape, 1, 1])
kernel = tf.cast(kernel, dtype=tf.float32)
# convolution layer
conv_fn = tf.nn.conv2d
image_filter = conv_fn(
input=image,
filters=kernel,
strides=1, # or (1, 1)
padding='SAME',
plt.figure(figsize=(15, 5))
# Plot the convolved image
plt.subplot(1, 3, 1)
plt.imshow(
tf.squeeze(image_filter)
)
plt.axis('off')
plt.title('Convolution')
# activation layer
relu_fn = tf.nn.relu
# Image detection
image_detect = relu_fn(image_filter)
plt.subplot(1, 3, 2)
plt.imshow(
# Reformat for plotting
tf.squeeze(image_detect)
plt.axis('off')
plt.title('Activation')
# Pooling layer
pool = tf.nn.pool
image_condense = pool(input=image_detect,
window_shape=(2, 2),
pooling_type='MAX',
strides=(2, 2),
padding='SAME',
)
plt.subplot(1, 3, 3)
plt.imshow(tf.squeeze(image_condense))
plt.axis('off')
plt.title('Pooling')
plt.show()
Implementing a Text Generator Using Recurrent Neural
Networks (RNNs)
In this section, we create a character-based text generator using
Recurrent Neural Network (RNN) in TensorFlow and Keras. We’ll
implement an RNN that learns patterns from a text sequence to
generate new text character-by-character.
Step 1: Import Necessary Libraries
We start by importing essential libraries for data handling and
building the neural network.
Python
import numpy as np
2
import tensorflow as tf
3
from tensorflow.keras.models import Sequential
4
from tensorflow.keras.layers import SimpleRNN, Dense
Step 2: Define the Input Text and Prepare Character Set
We define the input text and identify unique characters in the text,
which we’ll encode for our model.
Python
text = "This is GeeksforGeeks a software training institute"
2
chars = sorted(list(set(text)))
3
char_to_index = {char: i for i, char in enumerate(chars)}
4
index_to_char = {i: char for i, char in enumerate(chars)}
Step 3: Create Sequences and Labels
To train the RNN, we need sequences of fixed length ( seq_length) and
the character following each sequence as the label.
Python
seq_length = 3
2
sequences = []
3
labels = []
4
for i in range(len(text) - seq_length):
6
seq = text[i:i + seq_length]
7
label = text[i + seq_length]
8
sequences.append([char_to_index[char] for char in seq])
9
labels.append(char_to_index[label])
10
11
X = np.array(sequences)
12
y = np.array(labels)
Step 4: Convert Sequences and Labels to One-Hot Encoding
For training, we convert X and y into one-hot encoded tensors.
Python
1
X_one_hot = tf.one_hot(X, len(chars))
2
y_one_hot = tf.one_hot(y, len(chars))
Step 5: Build the RNN Model
We create a simple RNN model with a hidden layer of 50 units and a
Dense output layer with softmax activation.
Python
model = Sequential()
2
model.add(SimpleRNN(50, input_shape=(seq_length, len(chars)),
activation='relu'))
3
model.add(Dense(len(chars), activation='softmax'))
Step 6: Compile and Train the Model
We compile the model using the categorical_crossentropy loss and
train it for 100 epochs.
Python
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
2
model.fit(X_one_hot, y_one_hot, epochs=100)
Output:
Epoch 1/100
2/2 ━━━━━━━━━━━━━━━━━━━━ 4s 23ms/step - accuracy: 0.0243 -
loss: 2.9043
Epoch 2/100
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 14ms/step - accuracy: 0.0139 -
loss: 2.8720
Epoch 3/100
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 10ms/step - accuracy: 0.0243 -
loss: 2.8454
.
.
.
Epoch 99/100
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.8889 - loss:
0.5060
Epoch 100/100
2/2 ━━━━━━━━━━━━━━━━━━━━ 0s 9ms/step - accuracy: 0.9236 - loss:
0.4934
Step 7: Generate New Text Using the Trained Model
After training, we use a starting sequence to generate new text
character-by-character.
Python
start_seq = "This is G"
2
generated_text = start_seq
3
for i in range(50):
5
x = np.array([[char_to_index[char] for char in generated_text[-
seq_length:]]])
6
x_one_hot = tf.one_hot(x, len(chars))
7
prediction = model.predict(x_one_hot)
8
next_index = np.argmax(prediction)
9
next_char = index_to_char[next_index]
10
generated_text += next_char
11
12
print("Generated Text:")
13
print(generated_text)
Output:
Generated Text: This is Geeks a software training instituteais
is is is is
Complete Code
# Step 1: Import necessary libraries
import numpy as np
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import SimpleRNN, Dense
# Step 2: Define text and character set
text = "This is GeeksforGeeks a software training institute"
chars = sorted(list(set(text)))
char_to_index = {char: i for i, char in enumerate(chars)}
index_to_char = {i: char for i, char in enumerate(chars)}
# Step 3: Create sequences and labels
seq_length = 3
sequences = []
labels = []
for i in range(len(text) - seq_length):
seq = text[i:i + seq_length]
label = text[i + seq_length]
sequences.append([char_to_index[char] for char in seq])
labels.append(char_to_index[label])
X = np.array(sequences)
y = np.array(labels)
# Step 4: One-hot encode sequences and labels
X_one_hot = tf.one_hot(X, len(chars))
y_one_hot = tf.one_hot(y, len(chars))
# Step 5: Build the model
model = Sequential()
model.add(SimpleRNN(50, input_shape=(seq_length, len(chars)),
activation='relu'))
model.add(Dense(len(chars), activation='softmax'))
# Step 6: Compile and train the model
model.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
model.fit(X_one_hot, y_one_hot, epochs=100)
# Step 7: Generate text
start_seq = "This is G"
generated_text = start_seq
for i in range(50):
x = np.array([[char_to_index[char] for char in generated_text[-
seq_length:]]])
x_one_hot = tf.one_hot(x, len(chars))
prediction = model.predict(x_one_hot)
next_index = np.argmax(prediction)
next_char = index_to_char[next_index]
generated_text += next_char
print("Generated Text:")
print(generated_text)