ASSIGNMENT-4
IMAGE DATA TYPE:
1.DEEP LEARNING MODEL:
CNN:
PROGRAM:
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout
from google.colab import drive
# Mount Google Drive
drive.mount('/content/drive')
# Directories
train_dir = '/content/drive/MyDrive/DATASET/TRAIN'
val_dir = '/content/drive/MyDrive/DATASET/TEST'
# Data augmentation and preparation
train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=20, zoom_range=0.15,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150),
batch_size=32, class_mode='categorical')
val_generator = val_datagen.flow_from_directory(val_dir, target_size=(150, 150), batch_size=32,
class_mode='categorical')
# CNN Model
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),
MaxPooling2D(2, 2),
Conv2D(64, (3, 3), activation='relu'),
MaxPooling2D(2, 2),
Conv2D(128, (3, 3), activation='relu'),
MaxPooling2D(2, 2),
Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(len(train_generator.class_indices), activation='softmax')
])
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Train the model
history = model.fit(train_generator, epochs=10, validation_data=val_generator)
OUTPUT:
DCNN:
PROGRAM:
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, MaxPooling2D, Flatten, Dense, Dropout,
BatchNormalization
from google.colab import drive
# Mount Google Drive
drive.mount('/content/drive')
# Directories
train_dir = '/content/drive/MyDrive/New folder (2)/train'
val_dir = '/content/drive/MyDrive/New folder (2)/test'
# Data augmentation and preparation
train_datagen = ImageDataGenerator(rescale=1./255, rotation_range=20, zoom_range=0.15,
horizontal_flip=True)
val_datagen = ImageDataGenerator(rescale=1./255)
train_generator = train_datagen.flow_from_directory(train_dir, target_size=(150, 150),
batch_size=32, class_mode='categorical')
val_generator = val_datagen.flow_from_directory(val_dir, target_size=(150, 150), batch_size=32,
class_mode='categorical')
# Deep Convolutional Neural Network (DCNN) Model
model = Sequential([
Conv2D(32, (3, 3), activation='relu', input_shape=(150, 150, 3)),
BatchNormalization(),
MaxPooling2D(2, 2),
Conv2D(64, (3, 3), activation='relu'),
BatchNormalization(),
MaxPooling2D(2, 2),
Conv2D(128, (3, 3), activation='relu'),
BatchNormalization(),
MaxPooling2D(2, 2),
Conv2D(256, (3, 3), activation='relu'),
BatchNormalization(),
MaxPooling2D(2, 2),
Conv2D(512, (3, 3), activation='relu'),
BatchNormalization(),
MaxPooling2D(2, 2),
Flatten(),
Dense(512, activation='relu'),
Dropout(0.5),
Dense(len(train_generator.class_indices), activation='softmax')
])
# Compile the model
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# Model summary (optional, to see the structure)
model.summary()
# Train the model
history = model.fit(train_generator, epochs=10, validation_data=val_generator)
plt.title('Learning Curve for Random Forest Model')
plt.xlabel('Training Set Size')
plt.ylabel('Accuracy')
plt.legend(loc='best')
plt.grid(True)
plt.show()
OUTPUT:
2.MACHINE LEARNING MODEL:
RANDOM FOREST:
PROGRAM:
import numpy as np
import os
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, accuracy_score
from PIL import Image
from google.colab import drive
# Mount Google Drive
drive.mount('/content/drive')
# Function to load images from a directory and preprocess them
def load_images_from_folder(folder_path, image_size=(64, 64)):
images = []
labels = []
for label in os.listdir(folder_path):
label_folder = os.path.join(folder_path, label)
if os.path.isdir(label_folder):
for filename in os.listdir(label_folder):
img_path = os.path.join(label_folder, filename)
img = Image.open(img_path).convert('L') # Convert to grayscale
img = img.resize(image_size) # Resize image
img_array = np.array(img).flatten() # Flatten the image
images.append(img_array)
labels.append(label)
return np.array(images), np.array(labels)
# Load image dataset
folder_path = '/content/drive/MyDrive/DATASET' # Change to your dataset path
X, y = load_images_from_folder(folder_path)
# Split the dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Initialize and train the Random Forest model
rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
rf_model.fit(X_train, y_train)
# Make predictions on the test set
y_pred = rf_model.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
print("Classification Report:")
print(classification_report(y_test, y_pred))
plt.title('Learning Curve for Random Forest Model')
plt.xlabel('Training Set Size')
plt.ylabel('Accuracy')
plt.legend(loc='best')
plt.grid(True)
plt.show()
OUTPUT:
SVM:
PROGRAM:
import numpy as np
import os
from sklearn.model_selection import train_test_split, learning_curve
from sklearn.SVM import SVC
from sklearn.metrics import classification_report, accuracy_score
from PIL import Image
from google.colab import drive
import matplotlib.pyplot as plt
# Mount Google Drive
drive.mount('/content/drive')
# Function to load images from a directory and preprocess them
def load_images_from_folder(folder_path, image_size=(64, 64)):
images = []
labels = []
for label in os.listdir(folder_path):
label_folder = os.path.join(folder_path, label)
if os.path.isdir(label_folder):
for filename in os.listdir(label_folder):
img_path = os.path.join(label_folder, filename)
img = Image.open(img_path).convert('L') # Convert to grayscale
img = img.resize(image_size) # Resize image
img_array = np.array(img).flatten() # Flatten the image
images.append(img_array)
labels.append(label)
return np.array(images), np.array(labels)
# Load image dataset
folder_path = '/content/drive/MyDrive/DATASET' # Change to your dataset path
X, y = load_images_from_folder(folder_path)
# Split the dataset into training and test sets
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
# Initialize and train the Random Forest model
rf_model = RandomForestClassifier(n_estimators=100, random_state=42)
rf_model.fit(X_train, y_train)
# Make predictions on the test set
y_pred = rf_model.predict(X_test)
# Evaluate the model
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.2f}")
print("Classification Report:")
print(classification_report(y_test, y_pred))
# Plotting the Learning Curve
train_sizes, train_scores, val_scores = learning_curve(rf_model, X_train, y_train, cv=5,
train_sizes=np.linspace(0.1, 1.0, 10),
scoring='accuracy', n_jobs=-1)
# Compute mean and standard deviation for training and validation scores
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
val_scores_mean = np.mean(val_scores, axis=1)
val_scores_std = np.std(val_scores, axis=1)
# Plotting the learning curve
plt.figure(figsize=(10, 6))
plt.plot(train_sizes, train_scores_mean, 'o-', label='Training Accuracy')
plt.plot(train_sizes, val_scores_mean, 's-', label='Validation Accuracy')
# Fill between the curves for standard deviation
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1, color="r")
plt.fill_between(train_sizes, val_scores_mean - val_scores_std,
val_scores_mean + val_scores_std, alpha=0.1, color="g")
plt.title('Learning SVM')
plt.xlabel('Training Set Size')
plt.ylabel('Accuracy')
plt.legend(loc='best')
plt.grid(True)
plt.show()
OUTPUT: