[go: up one dir, main page]

0% found this document useful (0 votes)
9 views9 pages

DL Lab - Colab

The document provides examples of using Keras with TensorFlow and PyTorch for training neural networks on the MNIST and CIFAR-10 datasets. It includes code for building, training, and evaluating models in both libraries, demonstrating the differences in implementation. The results show test accuracies for both frameworks after training on the respective datasets.

Uploaded by

Vardhini Kondra
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
9 views9 pages

DL Lab - Colab

The document provides examples of using Keras with TensorFlow and PyTorch for training neural networks on the MNIST and CIFAR-10 datasets. It includes code for building, training, and evaluating models in both libraries, demonstrating the differences in implementation. The results show test accuracies for both frameworks after training on the respective datasets.

Uploaded by

Vardhini Kondra
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 9

# Installing Keras, Tensorflow and Pytorch libraries and making use of them

import tensorflow as tf
from tensorflow import keras
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms

# Function to use Keras and TensorFlow


def keras_tensorflow_example():
# Load the MNIST dataset
(x_train, y_train), (x_test, y_test) = keras.datasets.mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# Build the model using Keras Sequential API


model = keras.models.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dropout(0.2),
keras.layers.Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=5)

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print('\nKeras & TensorFlow - Test accuracy:', test_acc)

# Function to use PyTorch


def pytorch_example():
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5,), (0.5,))])

# Download and load the training data


trainset = datasets.MNIST('mnist_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)

# Download and load the test data


testset = datasets.MNIST('mnist_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=True)

# Define a simple feedforward neural network


class SimpleNN(nn.Module):
def __init__(self):
super(SimpleNN, self).__init__()
self.fc1 = nn.Linear(28 * 28, 128)
self.fc2 = nn.Linear(128, 10)

def forward(self, x):


x = x.view(-1, 28 * 28)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x

# Initialize the network


model = SimpleNN()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# Train the network


for epoch in range(5):
running_loss = 0
for images, labels in trainloader:
optimizer.zero_grad()
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch + 1} - Training loss: {running_loss / len(trainloader)}")

# Test the network


correct = 0
correct = 0
total = 0
with torch.no_grad():
for images, labels in testloader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()

print('\nPyTorch - Test accuracy: %d %%' % (100 * correct / total))

if __name__ == "__main__":
print("Running Keras & TensorFlow example:")
keras_tensorflow_example()
print("\nRunning PyTorch example:")
pytorch_example()

Running Keras & TensorFlow example:


Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/mnist.npz
11490434/11490434 [==============================] - 0s 0us/step
Epoch 1/5
1875/1875 [==============================] - 8s 4ms/step - loss: 0.2945 - accuracy: 0.9146
Epoch 2/5
1875/1875 [==============================] - 6s 3ms/step - loss: 0.1436 - accuracy: 0.9572
Epoch 3/5
1875/1875 [==============================] - 7s 4ms/step - loss: 0.1070 - accuracy: 0.9674
Epoch 4/5
1875/1875 [==============================] - 8s 4ms/step - loss: 0.0884 - accuracy: 0.9732
Epoch 5/5
1875/1875 [==============================] - 7s 4ms/step - loss: 0.0745 - accuracy: 0.9771
313/313 [==============================] - 1s 2ms/step - loss: 0.0751 - accuracy: 0.9771

Keras & TensorFlow - Test accuracy: 0.9771000146865845

Running PyTorch example:


Downloading http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz
Failed to download (trying next):
HTTP Error 403: Forbidden

Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz
Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-images-idx3-ubyte.gz to mnist_data/MNIST/raw/train-images-idx3-uby
100%|██████████| 9912422/9912422 [00:00<00:00, 16153635.93it/s]
Extracting mnist_data/MNIST/raw/train-images-idx3-ubyte.gz to mnist_data/MNIST/raw

Downloading http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz
Failed to download (trying next):
HTTP Error 403: Forbidden

Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz
Downloading https://ossci-datasets.s3.amazonaws.com/mnist/train-labels-idx1-ubyte.gz to mnist_data/MNIST/raw/train-labels-idx1-uby
100%|██████████| 28881/28881 [00:00<00:00, 484952.07it/s]
Extracting mnist_data/MNIST/raw/train-labels-idx1-ubyte.gz to mnist_data/MNIST/raw

Downloading http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz
Failed to download (trying next):
HTTP Error 403: Forbidden

Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz
Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-images-idx3-ubyte.gz to mnist_data/MNIST/raw/t10k-images-idx3-ubyte
100%|██████████| 1648877/1648877 [00:00<00:00, 4553076.76it/s]
Extracting mnist_data/MNIST/raw/t10k-images-idx3-ubyte.gz to mnist_data/MNIST/raw

Downloading http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz
Failed to download (trying next):
HTTP Error 403: Forbidden

Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz
Downloading https://ossci-datasets.s3.amazonaws.com/mnist/t10k-labels-idx1-ubyte.gz to mnist_data/MNIST/raw/t10k-labels-idx1-ubyte
100%|██████████| 4542/4542 [00:00<00:00, 8981861.75it/s]
Extracting mnist_data/MNIST/raw/t10k-labels-idx1-ubyte.gz to mnist_data/MNIST/raw

Epoch 1 - Training loss: 0.382502045951037


Epoch 2 - Training loss: 0.19840545332365073
Epoch 3 - Training loss: 0.1431313604315016
E h 4 T i i l 0 11595709192286581

# Applying the Convolution Neural Network on computer vision problems


# Import necessary libraries
import tensorflow as tf
from tensorflow import keras
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms

# Function to use Keras and TensorFlow


def keras_tensorflow_cnn_example():
# Load the CIFAR-10 dataset
(x_train, y_train), (x_test, y_test) = keras.datasets.cifar10.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

# Build the CNN model using Keras Sequential API


model = keras.models.Sequential([
keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=(32, 32, 3)),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(64, (3, 3), activation='relu'),
keras.layers.MaxPooling2D((2, 2)),
keras.layers.Conv2D(64, (3, 3), activation='relu'),
keras.layers.Flatten(),
keras.layers.Dense(64, activation='relu'),
keras.layers.Dense(10, activation='softmax')
])

# Compile the model


model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])

# Train the model


model.fit(x_train, y_train, epochs=10, validation_data=(x_test, y_test))

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print('\nKeras & TensorFlow - Test accuracy:', test_acc)

# Function to use PyTorch


def pytorch_cnn_example():
# Define a transform to normalize the data
transform = transforms.Compose([transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])

# Download and load the training data


trainset = datasets.CIFAR10('cifar10_data/', download=True, train=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset, batch_size=64, shuffle=True)

# Download and load the test data


testset = datasets.CIFAR10('cifar10_data/', download=True, train=False, transform=transform)
testloader = torch.utils.data.DataLoader(testset, batch_size=64, shuffle=False)

# Define a simple CNN


class SimpleCNN(nn.Module):
def __init__(self):
super(SimpleCNN, self).__init__()
self.conv1 = nn.Conv2d(3, 32, 3, padding=1)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(32, 64, 3, padding=1)
self.conv3 = nn.Conv2d(64, 64, 3, padding=1)
self.fc1 = nn.Linear(64 * 4 * 4, 64)
self.fc2 = nn.Linear(64, 10)

def forward(self, x):


x = self.pool(torch.relu(self.conv1(x)))
x = self.pool(torch.relu(self.conv2(x)))
x = self.pool(torch.relu(self.conv3(x)))
x = x.view(-1, 64 * 4 * 4)
x = torch.relu(self.fc1(x))
x = self.fc2(x)
return x

# Initialize the network


model = SimpleCNN()
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=0.001)

# Train the network


for epoch in range(10):
running_loss = 0.0
for images, labels in trainloader:
optimizer.zero_grad()
outputs = model(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
running_loss += loss.item()
print(f"Epoch {epoch + 1}, Loss: {running_loss / len(trainloader)}")

# Test the network


correct = 0
total = 0
with torch.no_grad():
for images, labels in testloader:
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()

print('\nPyTorch - Test accuracy: %d %%' % (100 * correct / total))

if __name__ == "__main__":
print("Running Keras & TensorFlow CNN example:")
keras_tensorflow_cnn_example()
print("\nRunning PyTorch CNN example:")
pytorch_cnn_example()

Running Keras & TensorFlow CNN example:


Downloading data from https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz
170498071/170498071 [==============================] - 6s 0us/step
Epoch 1/10
1563/1563 [==============================] - 73s 46ms/step - loss: 1.5518 - accuracy: 0.4314 - val_loss: 1.3380 - val_accuracy: 0.51
Epoch 2/10
1563/1563 [==============================] - 68s 44ms/step - loss: 1.2141 - accuracy: 0.5686 - val_loss: 1.1535 - val_accuracy: 0.58
Epoch 3/10
1563/1563 [==============================] - 68s 44ms/step - loss: 1.0662 - accuracy: 0.6232 - val_loss: 1.0307 - val_accuracy: 0.63
Epoch 4/10
1563/1563 [==============================] - 66s 42ms/step - loss: 0.9662 - accuracy: 0.6601 - val_loss: 0.9972 - val_accuracy: 0.64
Epoch 5/10
1563/1563 [==============================] - 68s 44ms/step - loss: 0.8952 - accuracy: 0.6860 - val_loss: 0.9389 - val_accuracy: 0.67
Epoch 6/10
1563/1563 [==============================] - 66s 42ms/step - loss: 0.8384 - accuracy: 0.7061 - val_loss: 0.9665 - val_accuracy: 0.66
Epoch 7/10
1563/1563 [==============================] - 67s 43ms/step - loss: 0.7906 - accuracy: 0.7218 - val_loss: 0.9274 - val_accuracy: 0.67
Epoch 8/10
1563/1563 [==============================] - 67s 43ms/step - loss: 0.7469 - accuracy: 0.7382 - val_loss: 0.9037 - val_accuracy: 0.69
Epoch 9/10
1563/1563 [==============================] - 69s 44ms/step - loss: 0.7115 - accuracy: 0.7495 - val_loss: 0.9128 - val_accuracy: 0.69
Epoch 10/10
1563/1563 [==============================] - 68s 43ms/step - loss: 0.6708 - accuracy: 0.7644 - val_loss: 0.9442 - val_accuracy: 0.67
313/313 [==============================] - 3s 11ms/step - loss: 0.9442 - accuracy: 0.6786

Keras & TensorFlow - Test accuracy: 0.678600013256073

Running PyTorch CNN example:


Downloading https://www.cs.toronto.edu/~kriz/cifar-10-python.tar.gz to cifar10_data/cifar-10-python.tar.gz
100%|██████████| 170498071/170498071 [00:05<00:00, 31721964.37it/s]
Extracting cifar10_data/cifar-10-python.tar.gz to cifar10_data/
Files already downloaded and verified
Epoch 1, Loss: 1.5147856874843997
Epoch 2, Loss: 1.13315472814738
Epoch 3, Loss: 0.962305731023364
Epoch 4, Loss: 0.8520413751492415
Epoch 5, Loss: 0.7630796948128649
Epoch 6, Loss: 0.7005950398838429
Epoch 7, Loss: 0.6443915752422474
Epoch 8, Loss: 0.5941478302487937
Epoch 9, Loss: 0.5554819284650065
Epoch 10, Loss: 0.5147728732098704

PyTorch - Test accuracy: 73 %

# Image classification on MNIST dataset (CNN model with Fully connected layer)
import tensorflow as tf
from tensorflow.keras import layers, models
import matplotlib.pyplot as plt

# Load and preprocess the MNIST dataset


mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()

# Normalize the images to the range [0, 1]


x_train, x_test = x_train / 255.0, x_test / 255.0

# Reshape the data to include a channel dimension (for grayscale)


x_train = x_train.reshape((x_train.shape[0], 28, 28, 1))
x_test = x_test.reshape((x_test.shape[0], 28, 28, 1))

# Define the CNN model


model = models.Sequential([
layers.Conv2D(32, (3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(64, (3, 3), activation='relu'),
layers.Flatten(),
layers.Dense(64, activation='relu'),
layers.Dense(10, activation='softmax')
])

# Compile the model


model.compile(
optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy']
)

# Train the model


history = model.fit(
x_train, y_train,
epochs=10,
validation_data=(x_test, y_test)
)

# Evaluate the model


test_loss, test_acc = model.evaluate(x_test, y_test)
print('Test accuracy:', test_acc)

# Plot training and validation accuracy over epochs


plt.plot(history.history['accuracy'], label='accuracy')
plt.plot(history.history['val_accuracy'], label = 'val_accuracy')
plt.xlabel('Epoch')
plt.ylabel('Accuracy')
plt.ylim([0.5, 1])
plt.legend(loc='lower right')
plt.show()
Epoch 1/10
1875/1875 [==============================] - 58s 31ms/step - loss: 0.1381 - accuracy: 0.9579 - val_loss: 0.0524 - val_accuracy: 0.98
Epoch 2/10
1875/1875 [==============================] - 59s 32ms/step - loss: 0.0461 - accuracy: 0.9862 - val_loss: 0.0324 - val_accuracy: 0.98
Epoch 3/10
1875/1875 [==============================] - 56s 30ms/step - loss: 0.0318 - accuracy: 0.9902 - val_loss: 0.0319 - val_accuracy: 0.99
Epoch 4/10
1875/1875 [==============================] - 56s 30ms/step - loss: 0.0253 - accuracy: 0.9923 - val_loss: 0.0286 - val_accuracy: 0.99
Epoch 5/10
1875/1875 [==============================] - 55s 29ms/step - loss: 0.0198 - accuracy: 0.9935 - val_loss: 0.0306 - val_accuracy: 0.99
Epoch 6/10
1875/1875 [==============================] - 55s 29ms/step - loss: 0.0153 - accuracy: 0.9949 - val_loss: 0.0346 - val_accuracy: 0.99
Epoch 7/10
1875/1875 [==============================] - 51s 27ms/step - loss: 0.0130 - accuracy: 0.9957 - val_loss: 0.0286 - val_accuracy: 0.99
Epoch 8/10
1875/1875 [==============================] - 53s 28ms/step - loss: 0.0119 - accuracy: 0.9962 - val_loss: 0.0303 - val_accuracy: 0.99
Epoch 9/10
1875/1875 [==============================] - 53s 28ms/step - loss: 0.0084 - accuracy: 0.9970 - val_loss: 0.0368 - val_accuracy: 0.99
Epoch 10/10
1875/1875 [==============================] - 53s 28ms/step - loss: 0.0088 - accuracy: 0.9972 - val_loss: 0.0326 - val_accuracy: 0.99
313/313 [==============================] - 4s 11ms/step - loss: 0.0326 - accuracy: 0.9926
Test accuracy: 0.9926000237464905

# Applying the Deep Learning Models in the field of Natural Language Processing
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Embedding, SpatialDropout1D
from tensorflow.keras.preprocessing.sequence import pad_sequences

# Parameters for data preprocessing and model


max_features = 5000 # Number of words to consider as features
maxlen = 100 # Max length of each review (pad/truncate to this length)
batch_size = 64
embedding_dims = 128
epochs = 10

# Load the IMDB dataset


(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# Pad sequences to maxlen


x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)

# Define the LSTM model


model = Sequential()
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(SpatialDropout1D(0.2))
model.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model.add(Dense(1, activation='sigmoid'))

# Compile the model


model.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)

# Print model summary


print(model.summary())

# Train the model


history = model.fit(
x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, y_test)
)

# Evaluate the model


score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
print(f'Test score: {score}')
print(f'Test accuracy: {acc}')

Downloading data from https://storage.googleapis.com/tensorflow/tf-keras-datasets/imdb.npz


17464789/17464789 [==============================] - 0s 0us/step
Model: "sequential_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
embedding (Embedding) (None, 100, 128) 640000

spatial_dropout1d (Spatial (None, 100, 128) 0


Dropout1D)

lstm (LSTM) (None, 100) 91600

dense_6 (Dense) (None, 1) 101

=================================================================
Total params: 731701 (2.79 MB)
Trainable params: 731701 (2.79 MB)
Non-trainable params: 0 (0.00 Byte)
_________________________________________________________________
None
Epoch 1/10
391/391 [==============================] - 144s 359ms/step - loss: 0.4389 - accuracy: 0.7853 - val_loss: 0.3759 - val_accuracy: 0.83
Epoch 2/10
391/391 [==============================] - 138s 353ms/step - loss: 0.3067 - accuracy: 0.8711 - val_loss: 0.3505 - val_accuracy: 0.84
Epoch 3/10
391/391 [==============================] - 141s 362ms/step - loss: 0.2599 - accuracy: 0.8931 - val_loss: 0.3649 - val_accuracy: 0.84
Epoch 4/10
391/391 [==============================] - 140s 359ms/step - loss: 0.2284 - accuracy: 0.9084 - val_loss: 0.3927 - val_accuracy: 0.84
Epoch 5/10
391/391 [==============================] - 142s 363ms/step - loss: 0.1954 - accuracy: 0.9233 - val_loss: 0.4011 - val_accuracy: 0.83
Epoch 6/10
391/391 [==============================] - 171s 438ms/step - loss: 0.1737 - accuracy: 0.9328 - val_loss: 0.4749 - val_accuracy: 0.83
Epoch 7/10
391/391 [==============================] - 167s 427ms/step - loss: 0.1551 - accuracy: 0.9412 - val_loss: 0.4344 - val_accuracy: 0.83
Epoch 8/10
391/391 [==============================] - 145s 371ms/step - loss: 0.1363 - accuracy: 0.9494 - val_loss: 0.4848 - val_accuracy: 0.83
Epoch 9/10
391/391 [==============================] - 148s 380ms/step - loss: 0.1133 - accuracy: 0.9587 - val_loss: 0.5155 - val_accuracy: 0.83
Epoch 10/10
391/391 [==============================] - 144s 369ms/step - loss: 0.1109 - accuracy: 0.9608 - val_loss: 0.5288 - val_accuracy: 0.83
391/391 [==============================] - 22s 55ms/step - loss: 0.5288 - accuracy: 0.8346
Test score: 0.5288125872612
Test accuracy: 0.8345999717712402

# Train a sentiment analysis model on IMDB dataset, use RNN layers with LSTM/GRU notes
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, GRU, Dense, SpatialDropout1D

# Parameters for data preprocessing and model


max_features = 5000 # Number of words to consider as features
maxlen = 100 # Max length of each review (pad/truncate to this length)
batch_size = 64
embedding_dims = 128
epochs = 10

# Load the IMDB dataset


(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)

# Pad sequences to maxlen


x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)

# Define the RNN model with LSTM


model_lstm = Sequential()
model_lstm.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model_lstm.add(SpatialDropout1D(0.2))
model_lstm.add(LSTM(100, dropout=0.2, recurrent_dropout=0.2))
model_lstm.add(Dense(1, activation='sigmoid'))

# Compile the model


model_lstm.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)

# Print model summary


print("LSTM Model Summary:")
print(model_lstm.summary())
print()

# Train the LSTM model


history_lstm = model_lstm.fit(
x_train, y_train,
epochs=epochs,
batch_size=batch_size,
validation_data=(x_test, y_test)
)

# Evaluate the LSTM model


score_lstm, acc_lstm = model_lstm.evaluate(x_test, y_test, batch_size=batch_size)
print(f'LSTM Test score: {score_lstm}')
print(f'LSTM Test accuracy: {acc_lstm}')
print()

# Define the RNN model with GRU


model_gru = Sequential()
model_gru.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model_gru.add(SpatialDropout1D(0.2))
model_gru.add(GRU(100, dropout=0.2, recurrent_dropout=0.2))
model_gru.add(Dense(1, activation='sigmoid'))

# Compile the model


model_gru.compile(
optimizer='adam',
loss='binary_crossentropy',
metrics=['accuracy']
)

# Print model summary


print("GRU Model Summary:")
print(model_gru.summary())
print()
Start coding or generate with AI.
# Train the GRU model
history_gru = model_gru.fit(
# Applying the Autoencoder algorithms for encoding the real-world data
x_train, y_train,
epochs=epochs,
batch_size=batch_size,
import tensorflow as tf
lid ti d t ( t t t t)

You might also like