EXPERIMENT-1:
Implement multilayer perceptron algorithm for MNIST Hand written Digit Classification.
Aim: Implement multilayer perceptron algorithm for MNIST Hand written Digit Classification.
PROGRAM:
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
import numpy as np
import matplotlib.pyplot as plt
# Load and preprocess the MNIST dataset
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images, test_images = train_images / 255.0, test_images / 255.0
# Flatten the images and one-hot encode the labels
train_images = train_images.reshape((60000, 28 * 28))
test_images = test_images.reshape((10000, 28 * 28))
train_labels = tf.keras.utils.to_categorical(train_labels)
test_labels = tf.keras.utils.to_categorical(test_labels)
# Define the MLP model
model = models.Sequential()
model.add(layers.Dense(128, activation='relu', input_shape=(28 * 28,)))
model.add(layers.Dropout(0.2))
model.add(layers.Dense(10, activation='softmax'))
# Compile the model
model.compile(optimizer='adam',
loss='categorical_crossentropy',
metrics=['accuracy'])
# Train the model
model.fit(train_images, train_labels, epochs=5, batch_size=64, validation_split=0.2)
# Evaluate the model on the test set
test_loss, test_acc = model.evaluate(test_images, test_labels)
print(f'Test accuracy: {test_acc}')
# Make predictions on some test samples
num_samples = 5
sample_indices = np.random.choice(test_images.shape[0], num_samples, replace=False)
samples_to_predict = test_images[sample_indices]
predictions = model.predict(samples_to_predict)
# Display the predictions with images
for i in range(num_samples):
predicted_label = np.argmax(predictions[i])
true_label = np.argmax(test_labels[sample_indices[i]])
# Reshape the flattened image back to (28, 28)
sample_image = samples_to_predict[i].reshape((28, 28))
plt.subplot(1, num_samples, i + 1)
plt.imshow(sample_image, cmap='gray')
plt.title(f"True: {true_label}\nPredicted: {predicted_label}")
plt.axis('off')
plt.show()
OUPUT :
EXPERIMENT-2:
Design a neural network for classifying movie reviews (Binary Classification) using IMDB
dataset.
Aim: Design a neural network for classifying movie reviews (Binary Classification) using IMDB
dataset..
PROGRAM:
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, Flatten, Dense
from tensorflow.keras.preprocessing.sequence import pad_sequences
# Load the IMDb dataset and preprocess it
max_features, maxlen = 10000, 100
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=max_features)
train_data, test_data = pad_sequences(train_data, maxlen=maxlen), pad_sequences(test_data,
maxlen=maxlen)
# Define, compile, and train the model
model = Sequential([
Embedding(max_features, 8, input_length=maxlen),
Flatten(),
Dense(1, activation='sigmoid')
])
model.compile(optimizer='rmsprop', loss='binary_crossentropy', metrics=['accuracy'])
model.fit(train_data, train_labels, epochs=10, batch_size=32, validation_split=0.2)
# Evaluate and print the model's accuracy
print(f'Test Accuracy: {model.evaluate(test_data, test_labels)[1] * 100:.2f}%')
# Function to process and predict sentiment of reviews
word_index = imdb.get_word_index()
def predict_sentiment(review, word_index, model, maxlen):
# Tokenize and pad the review
tokens = pad_sequences([[word_index.get(word, 0) for word in review.lower().split()]],
maxlen=maxlen)
prediction = model.predict(tokens)
return prediction[0, 0]
# Predict sentiment for sample reviews
positive_review = "This movie was fantastic! The acting and plot were amazing."
negative_review = "I hated this movie. The acting was terrible, and the plot made no sense."
positive_prediction = predict_sentiment(positive_review, word_index, model, maxlen)
negative_prediction = predict_sentiment(negative_review, word_index, model, maxlen)
print(f' Positive Review Prediction: {positive_prediction:.4f}')
print(f' Negative Review Prediction: {negative_prediction:.4f}')
OUTPUT :
Test Accuracy: 84.87%
Positive Review Prediction: 0.2144
Negative Review Prediction: 0.4197
EXPERIMENT-3:
Design a neural Network for classifying news wires (Multi class classification) using Reuters
dataset.
Aim: Design a neural Network for classifying news wires (Multi class classification) using
Reuters dataset…
PROGRAM :
import numpy as np
import tensorflow as tf
from tensorflow.keras.datasets import reuters
from tensorflow.keras import models, layers
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
# Load the dataset
(train_data, train_labels), (test_data, test_labels) = reuters.load_data(num_words=10000)
# Function to vectorize sequences
def vectorize_sequences(sequences, dimension=10000):
results = np.zeros((len(sequences), dimension))
for i, sequence in enumerate(sequences):
results[i, sequence] = 1.
return results
# Vectorize data
x_train = vectorize_sequences(train_data)
x_test = vectorize_sequences(test_data)
# One-hot encode labels
one_hot_train_labels = to_categorical(train_labels)
one_hot_test_labels = to_categorical(test_labels)
# Reserve validation set
x_val = x_train[:1000]
partial_x_train = x_train[1000:]
y_val = one_hot_train_labels[:1000]
partial_y_train = one_hot_train_labels[1000:]
# Define and compile the model
model = models.Sequential([
layers.Dense(64, activation='relu'),
layers.Dense(64, activation='relu'),
layers.Dense(46, activation='softmax')
])
model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
history = model.fit(partial_x_train, partial_y_train, epochs=9,
batch_size=512, validation_data=(x_val, y_val))
# Function to plot the training history for loss and accuracy
def plot_history(history):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(1, len(acc) + 1)
# Plot training and validation loss
plt.figure(figsize=(12, 5))
plt.subplot(1, 2, 1) # 1 row, 2 columns, first plot
plt.plot(epochs, loss, 'bo', label='Training loss')
plt.plot(epochs, val_loss, 'r', label='Validation loss')
plt.title('Training and validation loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
# Plot training and validation accuracy
plt.subplot(1, 2, 2) # 1 row, 2 columns, second plot
plt.plot(epochs, acc, 'bo', label='Training accuracy')
plt.plot(epochs, val_acc, 'r', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
plt.tight_layout() # Adjusts the plot to ensure everything fits without overlapping
plt.show()
# Plot the training and validation loss and accuracy
plot_history(history)
# Evaluate the model on test data
results = model.evaluate(x_test, one_hot_test_labels)
print(f"Test Loss: {results[0]}")
print(f"Test Accuracy: {results[1]}")
# Compare against a random baseline
test_labels_copy = test_labels.copy()
np.random.shuffle(test_labels_copy)
baseline_accuracy = np.sum(np.array(test_labels) == np.array(test_labels_copy))
len(test_labels)
print(f"Random Baseline Accuracy: {baseline_accuracy}")
# Make predictions
predictions = model.predict(x_test)
OUTPUT :
EXPERIMENT- 4:
Design a neural network for predicting house prices using Boston Housing Price dataset.
Aim: Design a neural network for predicting house prices using Boston Housing Price dataset….
PROGRAM :
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense
import matplotlib.pyplot as plt
# Load data from CSV file
data = pd.read_csv('C:\\Users\\Asus\\OneDrive\\Documents\\3.2 sem\\DLTF LAB
PROGRAMS\\HousingData.csv')
# Separate features and target variable
X = data.drop('MEDV', axis=1).values
y = data['MEDV'].values
# Normalize features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
# Split dataset into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
# Define the model architecture
model = Sequential([
Dense(64, activation='relu', input_shape=(X_train.shape[1],)),
Dense(64, activation='relu'),
Dense(1) # Output layer with linear activation
])
# Compile the model
model.compile(optimizer='adam', loss='mean_squared_error')
# Train the model
history = model.fit(X_train, y_train, epochs=100, batch_size=32, validation_split=0.1)
# Evaluate the model
test_loss = model.evaluate(X_test, y_test)
print(f'Test Loss: {test_loss}')
# Predict house prices
y_pred = model.predict(X_test)
# Visualize predictions vs actual values
plt.scatter(y_test, y_pred)
plt.xlabel('True Prices')
plt.ylabel('Predicted Prices')
plt.title('Predicted Prices vs True Prices')
plt.show()
DATASET :
S.NO: CRIM ZN INDUS CHAS NOX RM AGE DIS RAD TAX PTRATIO B LSTAT MEDV
1. 1
0.00632 18 2.31 0 0.538 6.575 65.2 4.09 1 296 15.3 396.9 4.98 24
2.
0.02731 18 7.07 0 0.469 6.421 78.9 4.9671 2 242 17.8 396.9 9.14 21.6
3.
0.02729 18 7.07 0 0.469 7.185 61.1 4.9671 2 242 17.8 392.83 4.03 34.7
4.
0.03237 18 2.18 0 0.458 6.998 45.8 6.0622 3 222 18.7 394.63 2.94 33.4
5.
0.06905 18 2.18 0 0.458 7.147 54.2 6.0622 3 222 18.7 396.9 NA 36.2
6.
0.02985 18 2.18 0 0.458 6.43 58.7 6.0622 3 222 18.7 394.12 5.21 28.7
7.
0.08829 18 7.87 NA 0.524 6.012 66.6 5.5605 5 311 15.2 395.6 12.43 22.9
8.
0.14455 18 7.87 0 0.524 6.172 96.1 5.9505 5 311 15.2 396.9 19.15 27.1
9.
0.21124 18 7.87 0 0.524 5.631 100 6.0821 5 311 15.2 386.63 29.93 16.5
10.
0.17004 18 7.87 NA 0.524 6.004 85.9 6.5921 5 311 15.2 386.71 17.1 18.9
11.
0.22489 18 7.87 0 0.524 6.377 94.3 6.3467 5 311 15.2 392.52 20.45 15
12.
0.11747 18 7.87 0 0.524 6.009 82.9 6.2267 5 311 15.2 396.9 13.27 18.9
13.
0.09378 18 7.87 0 0.524 5.889 39 5.4509 5 311 15.2 390.5 15.71 21.7
14.
0.62976 18 8.14 0 0.538 5.949 61.8 4.7075 4 307 21 396.9 8.26 20.4
15.
0.63796 18 8.14 NA 0.538 6.096 84.5 4.4619 4 307 21 380.02 10.26 18.2
16.
0.62739 18 8.14 0 0.538 5.834 56.5 4.4986 4 307 21 395.62 8.47 19.9
17.
1.05393 18 8.14 0 0.538 5.935 29.3 4.4986 4 307 21 386.85 6.58 23.1
18.
0.7842 18 8.14 0 0.538 5.99 81.7 4.2579 4 307 21 386.75 14.67 17.5
19.
0.80271 18 8.14 0 0.538 5.456 36.6 3.7965 4 307 21 288.99 11.69 20.2
20.
0.7258 18 8.14 0 0.538 5.727 69.5 3.7965 4 307 21 390.95 11.28 18.2
21.
1.25179 18 8.14 0 0.538 5.57 98.1 3.7979 4 307 21 376.57 21.02 13.6
22.
0.85204 18 8.14 0 0.538 5.965 89.2 4.0123 4 307 21 392.53 13.83 19.6
23.
1.23247 18 8.14 0 0.538 6.142 91.7 3.9769 4 307 21 396.9 18.72 15.2
24.
0.98843 18 8.14 0 0.538 5.813 100 4.0952 4 307 21 394.54 19.88 14.5
25.
0.75026 18 8.14 0 0.538 5.924 94.1 4.3996 4 307 21 394.33 16.3 15.6
26.
0.84054 18 8.14 0 0.538 5.599 85.7 4.4546 4 307 21 303.42 16.51 13.9
27.
0.67191 18 8.14 0 0.538 5.813 90.3 4.682 4 307 21 376.88 14.81 16.6
28.
0.95577 18 8.14 0 0.538 6.047 88.8 4.4534 4 307 21 306.38 17.28 14.8
29.
0.77299 18 8.14 0 0.538 6.495 94.4 4.4547 4 307 21 387.94 12.8 18.4
30.
1.00245 18 8.14 0 0.538 6.674 87.3 4.239 4 307 21 380.23 11.98 21
31.
1.13081 18 8.14 0 0.538 5.713 94.1 4.233 4 307 21 360.17 22.6 12.7
32.
1.35472 18 8.14 0 0.538 6.072 100 4.175 4 307 21 376.73 13.04 14.5
33.
1.38799 18 8.14 0 0.538 5.95 82 3.99 4 307 21 232.6 27.71 13.2
34.
1.15172 18 8.14 0 0.538 5.701 95 3.7872 4 307 21 358.77 18.35 13.1
35.
1.61282 18 8.14 0 0.538 6.096 96.9 3.7598 4 307 21 248.31 20.34 13.5
36.
0.06417 18 5.96 0 0.499 5.933 68.2 3.3603 5 279 19.2 396.9 NA 18.9
37.
0.09744 18 NA 0 0.499 5.841 61.4 3.3779 5 279 19.2 377.56 11.41 20
38.
0.08014 18 5.96 0 0.499 5.85 41.5 3.9342 5 279 19.2 396.9 8.77 21
39.
0.17505 18 5.96 0 0.499 5.966 30.2 3.8473 5 279 19.2 393.43 10.13 24.7
40.
0.02763 18 2.95 0 0.428 6.595 21.8 5.4011 3 252 18.3 395.63 4.32 30.8
41.
0.03359 18 2.95 0 0.428 7.024 15.8 5.4011 3 252 18.3 395.62 1.98 34.9
42.
0.12744 18 6.91 0 0.448 6.77 2.9 5.7209 3 233 17.9 385.41 4.84 26.6
43.
0.1415 18 6.91 0 0.448 6.169 6.6 5.7209 3 233 17.9 383.37 5.81 25.3
44.
0.15936 18 6.91 NA 0.448 6.211 6.5 5.7209 3 233 17.9 394.46 7.44 24.7
45.
0.12269 18 6.91 0 0.448 6.069 40 5.7209 3 233 17.9 389.39 9.55 21.2
46.
0.17142 18 6.91 0 0.448 5.682 33.8 5.1004 3 233 17.9 396.9 10.21 19.3
47.
0.18836 18 6.91 0 0.448 5.786 33.3 5.1004 3 233 17.9 396.9 14.15 20
48.
0.22927 18 NA 0 0.448 6.03 85.5 5.6894 3 233 17.9 392.74 18.8 16.6
49.
0.25387 18 6.91 0 0.448 5.399 95.3 5.87 3 233 17.9 396.9 30.81 14.4
OUTPUT :
EXPERIMENT- 5:
Build a Convolution Neural Network for MNIST Hand written Digit Classification.
Aim: Build a Convolution Neural Network for MNIST Hand written Digit Classification.
PROGRAM:
import tensorflow as tf
from tensorflow.keras import layers, models
from tensorflow.keras.datasets import mnist
# Load and preprocess the MNIST dataset
(train_images, train_labels), (test_images, test_labels) = mnist.load_data()
train_images = train_images.reshape((60000, 28, 28, 1)).astype('float32') / 255
test_images = test_images.reshape((10000, 28, 28, 1)).astype('float32') / 255
# Define the CNN model with reduced complexity
model = models.Sequential([
layers.Conv2D(16, (3, 3), activation='relu', input_shape=(28, 28, 1)),
layers.MaxPooling2D((2, 2)),
layers.Conv2D(32, (3, 3), activation='relu'),
layers.MaxPooling2D((2, 2)),
layers.Flatten(),
layers.Dense(32, activation='relu'),
layers.Dense(10, activation='softmax')
])
# Compile the model
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
# Define early stopping callback
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss',
patience=2)
# Train the model with increased batch size and early stopping
model.fit(train_images, train_labels, epochs=6, batch_size=128,
validation_split=0.2, callbacks=[early_stopping])
# Evaluate the model
test_loss, test_acc = model.evaluate(test_images, test_labels)
print('Test accuracy:', test_acc)
OUTPUT :
EXPERIMENT- 6:
Build a Convolution Neural Network for simple image (dogs and Cats) Classification.
Aim: Build a Convolution Neural Network for simple image (dogs and Cats) Classification
PROGRAM:
import tensorflow as tf
from tensorflow import keras
from keras import Sequential
from keras.layers import Dense
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
import matplotlib.pyplot as plt
import numpy as np
# Load the Iris dataset
iris = datasets.load_iris()
X, y = iris.data, iris.target
# One-hot encode the labels
ohe = OneHotEncoder(sparse=False)
y = ohe.fit_transform(y.reshape(-1, 1))
# Split the dataset
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Normalize the dataset
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# Define a simple neural network model
model = Sequential()
model.add(Dense(16, activation='relu', input_shape=(4,)))
model.add(Dense(8, activation='relu'))
model.add(Dense(3, activation='softmax'))
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
epochs = 50
history = model.fit(X_train, y_train, validation_data=(X_test, y_test), epochs=epochs,
verbose=1)
# Plot training and validation accuracy
plt.plot(history.history['accuracy'], color='red', label='train')
plt.plot(history.history['val_accuracy'], color='blue', label='validation')
plt.legend()
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.title('Training vs Validation Accuracy')
plt.show()
OUTPUT:
EXPERIMENT- 7:
Use a pre-trained convolution neural network (VGG 16) for image classification.
Aim: Use a pre-trained convolution neural network (VGG 16) for image classification.
PROGRAM:
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16, preprocess_input, decode_predictions
from tensorflow.keras.preprocessing import image
import matplotlib.pyplot as plt
from PIL import Image
# Load pre-trained VGG16 model
model = VGG16(weights='imagenet')
# Function to preprocess the image for VGG16
def preprocess_image(image_path):
img = image.load_img(image_path, target_size=(224, 224))
img_array = image.img_to_array(img)
img_array = np.expand_dims(img_array, axis=0)
img_array = preprocess_input(img_array)
return img_array
# Function to make predictions
def predict(image_path):
img_array = preprocess_image(image_path)
predictions = model.predict(img_array)
decoded_predictions = decode_predictions(predictions, top=3)[0] # Get top 3 predictions
for i, (imagenet_id, label, score) in enumerate(decoded_predictions):
print("{}. {}: {:.2f}%".format(i + 1, label, score * 100))
# Display the image
img = Image.open(image_path)
plt.imshow(img)
plt.axis('off')
plt.show()
# Allow user to select an image
image_path = input("Enter the path of the image: ")
predict(image_path)
OUTPUT :
EXPERIMENT- 8:
Implement one hot encoding of words or characters.
Aim: Implement one hot encoding of words or characters.
PROGRAM:
import pandas as pd
import numpy as np
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import OneHotEncoder, StandardScaler
# Load the employee dataset
data = pd.read_csv('C:\\Users\\KOTES\\Downloads\\Employee.csv')
# Perform one-hot encoding for categorical features
categorical_cols = ['Education', 'City', 'PaymentTier', 'Gender']
data_encoded = pd.get_dummies(data, columns=categorical_cols, drop_first=True)
# Convert 'EverBenched' column to numeric
# Split the data into features and target
X = data_encoded.drop('LeaveOrNot', axis=1)
y = data_encoded['LeaveOrNot']
# Normalize numerical features
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)
data_encoded['EverBenched'] = data_encoded['EverBenched'].apply(lambda x: 1 if x == 'Yes'
else 0)
# Split the data into training and testing sets
X_train, X_test, y_train, y_test = train_test_split(X_scaled, y, test_size=0.2, random_state=42)
# Build a simple deep learning model
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(64, activation='relu', input_shape=(X_scaled.shape[1],)),
tf.keras.layers.Dense(32, activation='relu'),
tf.keras.layers.Dense(1, activation='sigmoid')
])
# Compile the model
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model
model.fit(X_train, y_train, epochs=10, batch_size=32, validation_data=(X_test, y_test))
# Evaluate the model
loss, accuracy = model.evaluate(X_test, y_test)
print(f'Loss: {loss}, Accuracy: {accuracy}')
OUTPUT :
EXPERIMENT- 9:
Implement word embeddings for IMDB dataset.
Aim: Implement word embeddings for IMDB dataset.
PROGRAM:
import numpy as np
import tensorflow as tf
from tensorflow import keras
from matplotlib import pyplot as plt
# Set random seeds for reproducibility
np.random.seed(0xdeadbeef)
tf.random.set_seed(0xdeadbeef)
# Load the IMDB dataset
imdb = keras.datasets.imdb
num_words = 20000
(train_data, train_labels), (test_data, test_labels) = imdb.load_data(num_words=num_words)
# Preprocess the vocabulary
vocabulary = imdb.get_word_index()
vocabulary = {k:(v+3) for k,v in vocabulary.items()}
vocabulary["<PAD>"] = 0
vocabulary["<START>"] = 1
vocabulary["<UNK>"] = 2
vocabulary["<UNUSED>"] = 3
# Decode a review
index = {v: k for k, v in vocabulary.items()}
def decode_review(text):
return ' '.join([index.get(i, '?') for i in text])
# Pad sequences
maxlen = 256
train_data = keras.preprocessing.sequence.pad_sequences(train_data,
value=vocabulary["<PAD>"], padding='post', maxlen=maxlen)
test_data = keras.preprocessing.sequence.pad_sequences(test_data,
value=vocabulary["<PAD>"], padding='post', maxlen=maxlen)
# Build the model
model = keras.Sequential([
keras.layers.Embedding(len(vocabulary), 2, input_length=maxlen),
keras.layers.Flatten(),
keras.layers.Dropout(0.5),
keras.layers.Dense(5),
keras.layers.Dense(1, activation='sigmoid')
])
model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
# Train the model
history = model.fit(train_data, train_labels, epochs=5, batch_size=100,
validation_data=(test_data, test_labels), verbose=1)
# Plot accuracy
def plot_accuracy(history, miny=None):
acc = history.history['accuracy']
test_acc = history.history['val_accuracy']
epochs = range(len(acc))
plt.plot(epochs, acc)
plt.plot(epochs, test_acc)
if miny:
plt.ylim(miny, 1.0)
plt.title('accuracy')
plt.xlabel('epoch')
plt.show()
plot_accuracy(history)
# Plot word embeddings
get_embed_out = keras.backend.function([model.layers[0].input], [model.layers[1].output])
layer_output = get_embed_out([test_data[0]])
words = layer_output[0]
plt.scatter(words[:,0], words[:,1])
review = ['great', 'brilliant','crap','bad', 'fantastic', 'movie', 'seagal']
enc_review = tf.constant([vocabulary[word] for word in review])
words = get_embed_out([enc_review])[0]
plt.scatter(words[:,0], words[:,1])
for i, txt in enumerate(review):
plt.annotate(txt, (words[i,0], words[i,1]))
plt.show()
OUTPUT :
EXPERIMENT- 10:
Implement a Recurrent Neural Network for IMDB movie review classification problem.
Aim: Implement a Recurrent Neural Network for IMDB movie review classification problem.
PROGRAM:
from keras.models import Sequential
from keras.layers import Embedding, Dense, SimpleRNN
from keras.datasets import imdb
from keras.preprocessing import sequence
import matplotlib.pyplot as plt
max_word_range = 10000
# Loading the IMDB dataset
(input_train, output_train), (input_test, output_test) =
imdb.load_data(num_words=max_word_range)
print(len(input_train), 'train sequences.')
print(len(input_test), 'test sequences.')
max_word_amount = 500
# Padding sequences to ensure uniform input size
input_train = sequence.pad_sequences(input_train, maxlen=max_word_amount)
input_test = sequence.pad_sequences(input_test, maxlen=max_word_amount)
print('input_train shape:', input_train.shape)
print('input_test shape:', input_test.shape)
# Building the RNN model
model = Sequential([
Embedding(max_word_range, 32, input_length=max_word_amount),
SimpleRNN(32), # Using SimpleRNN for the recurrent layer
Dense(1, activation='sigmoid')
])
model.summary()
# Compiling the model
model.compile(optimizer='rmsprop',
loss='binary_crossentropy',
metrics=['accuracy'])
# Training the model
history = model.fit(input_train, output_train,
epochs=10,
batch_size=128,
validation_split=0.2)
# Extracting loss and accuracy from the history object
loss = history.history['loss']
acc = history.history['accuracy'] # Updated from 'acc' to 'accuracy' based on newer Keras
versions
val_loss = history.history['val_loss']
val_acc = history.history['val_accuracy'] # Updated from 'val_acc' to 'val_accuracy'
epochs = range(1, len(loss) + 1)
# Plotting training and validation loss and accuracy
f, axarr = plt.subplots(1, 2, figsize=(12, 6))
# Loss plot
axarr[0].plot(epochs, loss, 'bo', label='Training loss')
axarr[0].plot(epochs, val_loss, 'b', label='Validation loss')
axarr[0].set_title('Training and validation loss')
axarr[0].legend()
# Accuracy plot
axarr[1].plot(epochs, acc, 'bo', label='Training accuracy')
axarr[1].plot(epochs, val_acc, 'b', label='Validation accuracy')
axarr[1].set_title('Training and validation accuracy')
axarr[1].legend()
plt.show()
# Evaluating the model on the test set
score = model.evaluate(input_test, output_test)
print('Test Loss:', score[0])
print('Test Accuracy:', score[1])
# Making predictions
predictions = model.predict(input_test[0:5])
true_labels = output_test[0:5]
print("0 is a negative comment, 1 is a positive comment.")
for i in range(len(predictions)):
print("Prediction:", int(round(predictions[i][0])), ", True Label:", true_labels[i])
OUTPUT :