PROGRAM – 1
Implementation of Feed Forward Neural Network
a) Import necessary packages
b) Load the training and testing data
c) Define the network architecture using keras
d) Train the model using SGD
e) Evaluate the network
f) Plot the training loss and accuracy
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import random
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.compile(optimizer="sgd", loss="sparse_categorical_crossentropy",
metrics=["accuracy"])
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=5)
#evaluate the network
test_loss,test_acc=model.evaluate(x_test,y_test)
print("Loss=%.3f"%test_loss)
print("Accuracy=%.3f"%test_acc)
n=random.randint(0,9999)
plt.imshow(x_test[n])
plt.show()
predicted_value=model.predict(x_test)
plt.imshow(x_test[n])
plt.show()
print("Predicted Value:", predicted_value[n])
#plotting the training accuracy
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["Train","Validation"],loc="upper left")
plt.show()
#plotting the training loss
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["Train","Validation"],loc="upper right")
plt.show()
Output
PROGRAM – 2
Build the image classification model by dividing the model into 4 stages:
Loading and preprocessing the image data
Defining the models architecture
Training the model
Estimating the models performance
import numpy as np
import matplotlib.pyplot as plt
x=np.array([[0,1],[1,0],[1,1],[0,0]])
y=np.array([[1],[1],[0],[0]])
num_input=2
num_hidden=1
num_output=1
wxh=np.random.randn(num_input,num_hidden)
bh=np.zeros((1,num_hidden))
why=np.random.randn(num_hidden,num_output)
def sigmoid(z):
return 1/(1+np.exp(-z))
def sigmoid_derivative(z):
return sigmoid(z)*(1-sigmoid(z))
def forward_prop(x,wxh,why):
z1 = np.dot(x,wxh)+bh
a1 = sigmoid(z1)
z2 = np.dot(a1,why)
return z1,a1,z2
def cost_function(y,y_hat):
return 0.5*np.sum((y-y_hat)**2)
def backward_prop(x,y,z1,a1,z2,why):
m=y.shape[0]
dl_dz2=z2-y
dl_da1=np.dot(dl_dz2,why.T)
dl_dz1=dl_da1 * sigmoid_derivative(z1)
dj_dwhy=np.dot(a1.T,dl_dz2)/m
dj_dwxh=np.dot(x.T,dl_dz1)/m
dj_dbh=np.sum(dl_dz1,axis=0,keepdims=True)/m
return dj_dwxh,dj_dwhy,dj_dbh
n=0.01
num_iterations = 5000
costs=[]
for i in range(num_iterations):
z1,a1,z2=forward_prop(x,wxh,why)
y_hat=z2
cost=cost_function(y,y_hat)
costs.append(cost)
dj_dwxh,dj_dwhy,dj_dbh=backward_prop(x,y,z1,a1,z2,why)
wxh -=n*dj_dwxh
why -=n*dj_dwhy
bh -=n*dj_dbh
plt.grid()
plt.plot(range(num_iterations),costs)
plt.title('cost funtion')
plt.xlabel('Training Iterations')
plt.ylabel('cost')
plt.show()
z1,hidden_layer_activation,output=forward_prop(x,wxh,why)
print("output of the network:\n",output)
output of the network:
[[0.51499412]
[0.55763691]
[0.61069325]
[0.35298938]]
PROGRAM 3
Build the image classification model by dividing the model into four stages
a. Loading and preprocessing the image data
b. Defining the models architecture
c. Training the model
d. Estimating the model performance
import tensorflow as tf
from tensorflow import keras
import matplotlib.pyplot as plt
import random
mnist = tf.keras.datasets.mnist
(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train = x_train / 255.0
x_test = x_test / 255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation="relu"),
keras.layers.Dense(10, activation="softmax")
])
model.summary()
model.compile(optimizer="sgd",
loss="sparse_categorical_crossentropy",
metrics=['accuracy'])
history = model.fit(x_train, y_train, validation_data=(x_test, y_test), epochs=3)
# Evaluate the model on the test data
test_loss, test_accuracy = model.evaluate(x_test, y_test, verbose=2)
print(f"Test Accuracy: {test_accuracy * 100:.2f}%")
print(f"Test Loss: {test_loss:.4f}")
# Plotting the training and validation accuracy
plt.figure(figsize=(12, 6))
plt.subplot(1, 2, 1)
plt.plot(history.history['accuracy'], label='Training Accuracy')
plt.plot(history.history['val_accuracy'], label='Validation Accuracy')
plt.title('Training and Validation Accuracy')
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.legend()
# Plotting the training and validation loss
plt.subplot(1, 2, 2)
plt.plot(history.history['loss'], label='Training Loss')
plt.plot(history.history['val_loss'], label='Validation Loss')
plt.title('Training and Validation Loss')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
plt.show()
# Making predictions on random test images
num_images = 5
random_indices = random.sample(range(x_test.shape[0]), num_images)
for idx in random_indices:
plt.imshow(x_test[idx], cmap=plt.cm.binary)
plt.title(f"Predicted Label: {model.predict(x_test[idx:idx+1]).argmax()}\nTrue Label:
{y_test[idx]}")
plt.axis('off')
plt.show()
# You can also use the model to predict on new data using the `model.predict()` method.
OUTPUT
val_accuracy: 0.9259 - val_loss: 0.2610
313/313 - 0s - 941us/step - accuracy: 0.9259 - loss: 0.2610
Test Accuracy: 92.59%
Test Loss: 0.2610
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 255ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 26ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 31ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 32ms/step
1/1 ━━━━━━━━━━━━━━━━━━━━ 0s 26ms/step
PROGRAM 4
Write a program to implement simple CNN
import torch
import torch.nn.functional as F
import matplotlib.pyplot as plt
# Define the input image tensor
image = torch.tensor([
[0., 0., 0., 0., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 1., 1., 1., 0.],
[0., 0., 0., 0., 0.]
])
# Reshape the image to match (N, C, H, W)
image = image.unsqueeze(0).unsqueeze(0) # shape: (1, 1, 5, 5)
# Define the kernel (e.g., Laplacian filter)
kernel = torch.tensor([
[[
[0., 1., 0.],
[1., -4., 1.],
[0., 1., 0.]
]]
])
# Apply convolution
conv = F.conv2d(image, kernel, stride=1, padding=0)
# Apply max pooling
pooled = F.max_pool2d(conv, kernel_size=2, stride=2)
# Plotting
fig, ax = plt.subplots(1, 3, figsize=(15, 5))
# Original Image
ax[0].imshow(image.squeeze().numpy(), cmap='gray')
ax[0].set_title('Input Image')
ax[0].axis('off')
# After Convolution
ax[1].imshow(conv.squeeze().detach().numpy(), cmap='gray')
ax[1].set_title('After Convolution')
ax[1].axis('off')
# After Max Pooling
pooled_image = pooled.squeeze(0).squeeze(0)
ax[2].imshow(pooled_image.detach().numpy(), cmap='gray')
ax[2].set_title('After Max Pooling')
ax[2].axis('off')
plt.show()
Output:
PROGRAM 5
Write a program to perform Sentiment Analysis Using RNN
Write a program to perform Sentiment Analysis using RNN
import tensorflow as tf
from tensorflow.keras.datasets import imdb
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Embedding, LSTM, Dense
vocab_size = 5000
maxlen = 500
batch_size = 64
epochs = 3
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=vocab_size)
x_train = pad_sequences(x_train, maxlen=maxlen)
x_test = pad_sequences(x_test, maxlen=maxlen)
model = Sequential([
Embedding(vocab_size, 32, input_length=maxlen),
LSTM(100),
Dense(1, activation='sigmoid')
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy']
)
print(model.summary())
model.fit(
x_train, y_train,
validation_split=0.1,
batch_size=batch_size,
epochs=epochs
)
loss,acc=model.evaluate(x_test,y_test)
print(f"loss: {loss}\naccuracy: {acc}")
Output:
PROGRAM 6
Write a program to implement an LSTM based Auto encoding in Tensorflow/Keras
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.preprocessing import StandardScaler
from tensorflow import keras
df = pd.read_csv('spx.csv', parse_dates=['date'], index_col='date')
print("Original columns:", df.columns.tolist())
# 2. Normalize column names
df.columns = [col.strip().lower() for col in df.columns]
print("Normalized columns:", df.columns.tolist())
# 3. Ensure 'close' is numeric, drop any bad rows
df['close'] = pd.to_numeric(df['close'], errors='coerce')
n_bad = df['close'].isna().sum()
if n_bad > 0:
print(f"Dropping {n_bad} rows where 'close' was not numeric")
df = df.dropna(subset=['close'])
# 4. Train/Test split
TRAIN_RATIO = 0.9
train_size = int(len(df) * TRAIN_RATIO)
train_df = df.iloc[:train_size].copy()
test_df = df.iloc[train_size:].copy()
# 5. Scale the 'close' prices
scaler = StandardScaler()
train_df['close'] = scaler.fit_transform(train_df[['close']])
test_df['close'] = scaler.transform(test_df[['close']])
# 6. Create sliding-window datasets
def create_dataset(series: pd.Series, time_steps: int = 30):
X, y = [], []
for i in range(len(series) - time_steps):
window = series.iloc[i : i + time_steps].values.reshape(-1, 1)
X.append(window)
y.append(series.iloc[i + time_steps])
return np.array(X), np.array(y)
TIME_STEPS = 30
X_train, y_train = create_dataset(train_df['close'], TIME_STEPS)
X_test, y_test = create_dataset(test_df['close'], TIME_STEPS)
print("X_train shape:", X_train.shape) # e.g. (n_samples, 30, 1)
print("y_train shape:", y_train.shape)
# 7. Build LSTM autoencoder
model = keras.Sequential([
keras.layers.LSTM(64, input_shape=(TIME_STEPS, 1)),
keras.layers.RepeatVector(TIME_STEPS),
keras.layers.LSTM(64, return_sequences=True),
keras.layers.Dropout(0.2),
keras.layers.TimeDistributed(keras.layers.Dense(1))
])
model.compile(optimizer='adam', loss='mse')
model.summary()
# 8. Train (using X_train as both input and target for autoencoder)
history = model.fit(
X_train, X_train,
epochs=20,
batch_size=32,
validation_split=0.1,
shuffle=False
)
# 9. Plot losses
plt.figure(figsize=(8,4))
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'],label='validation loss')
plt.title('LSTM Autoencoder Loss')
plt.xlabel('Epoch')
plt.ylabel('MSE')
plt.legend()
plt.show()
X_test_pred = model.predict(X_test, verbose=0)
test_mse = np.mean((X_test_pred - X_test)**2, axis=(1,2))
print("Average test MSE:", test_mse.mean())
PROGRAM 7
Write a Program to implement image generation using GAN
import os
import numpy as np
from keras.datasets import mnist
from keras.models import Sequential, Model
from keras.layers import Input, InputLayer, Dense, Reshape, Flatten, LeakyReLU,
BatchNormalization, Dropout
from keras.optimizers import Adam
import matplotlib.pyplot as plt
def build_generator(z_dim=100, img_shape=(28,28,1)):
model = Sequential([
InputLayer(input_shape=(z_dim,)),
Dense(128), LeakyReLU(0.2), BatchNormalization(momentum=0.8),
Dense(256), LeakyReLU(0.2), BatchNormalization(momentum=0.8),
Dense(512), LeakyReLU(0.2), BatchNormalization(momentum=0.8),
Dense(np.prod(img_shape), activation='tanh'),
Reshape(img_shape)
])
noise = Input(shape=(z_dim,))
return Model(noise, model(noise))
def build_discriminator(img_shape=(28,28,1)):
seq = Sequential([
InputLayer(input_shape=img_shape),
Flatten(),
Dense(512), LeakyReLU(0.2), Dropout(0.3),
Dense(256), LeakyReLU(0.2), Dropout(0.3),
Dense(1, activation='sigmoid')
])
img = Input(shape=img_shape)
D = Model(img, seq(img))
D.compile(optimizer=Adam(2e-4, 0.5), loss='binary_crossentropy',
metrics=['accuracy'])
return D
def sample_images(G, step, z_dim, n=25):
os.makedirs('imgs', exist_ok=True)
noise = np.random.normal(size=(n, z_dim))
gen = G.predict(noise, verbose=0)
gen = 0.5 * gen + 0.5
fig, axs = plt.subplots(5,5, figsize=(5,5))
for i, ax in enumerate(axs.flat):
ax.imshow(gen[i,:,:,0], cmap='gray')
ax.axis('off')
plt.tight_layout()
path = f"imgs/{step}.png"
fig.savefig(path)
plt.show()
plt.close()
def train(steps=2000, batch_size=64, z_dim=100):
# Load data
(X, _), _ = mnist.load_data()
X = (X.astype('float32') - 127.5) / 127.5
X = np.expand_dims(X, -1)
G = build_generator(z_dim)
D = build_discriminator()
D.trainable = False
z = Input(shape=(z_dim,))
combined = Model(z, D(G(z)))
combined.compile(optimizer=Adam(1e-4, 0.5), loss='binary_crossentropy')
D.trainable = True
real = np.ones((batch_size,1)) * 0.9
fake = np.zeros((batch_size,1))
last_d_loss = last_d_acc = last_g_loss = None
for step in range(1, steps+1):
idx = np.random.randint(0, X.shape[0], batch_size)
real_imgs = X[idx]
noise = np.random.normal(size=(batch_size, z_dim))
fake_imgs = G.predict(noise, verbose=0)
d_loss_real, d_acc_real = D.train_on_batch(real_imgs, real)
d_loss_fake, d_acc_fake = D.train_on_batch(fake_imgs, fake)
last_d_loss = 0.5 * (d_loss_real + d_loss_fake)
last_d_acc = 0.5 * (d_acc_real + d_acc_fake)
last_g_loss = combined.train_on_batch(
np.random.normal(size=(batch_size, z_dim)), real
)
if step % 1000 == 0:
print(f"Step {step}/{steps} — D_loss: {last_d_loss:.4f}, D_acc:
{last_d_acc*100:.2f}%, G_loss: {last_g_loss:.4f}")
sample_images(G, step, z_dim)
print("Training completed!")
print(f"Final D_loss: {last_d_loss:.4f}")
print(f"Final D_acc: {last_d_acc*100:.2f}%")
print(f"Final G_loss: {last_g_loss:.4f}")
return G, D
if __name__ == '__main__':
train()
OUTPUT