Thanks to visit codestin.com
Credit goes to www.scribd.com

0% found this document useful (0 votes)
32 views4 pages

Regularization Techniques in DL

practical for deep learnin

Uploaded by

tmingolem23
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
0% found this document useful (0 votes)
32 views4 pages

Regularization Techniques in DL

practical for deep learnin

Uploaded by

tmingolem23
Copyright
© © All Rights Reserved
We take content rights seriously. If you suspect this is your content, claim it here.
Available Formats
Download as PDF, TXT or read online on Scribd
You are on page 1/ 4

11/27/24, 1:01 AM _DL_Lab5..

ipynb - Colab

Rushikesh Jadhav

232010013

DL Lab 5

Aim : Implementation of Regularization Techniques

import tensorflow as tf
from tensorflow.keras import layers, models
from sklearn.model_selection import train_test_split
from sklearn.datasets import fetch_california_housing

# Load California housing dataset


data = fetch_california_housing()
X, y = data.data, data.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=4

1. Dropout

add Code add Text


# Build model with Dropout
model_dropout = models.Sequential([
layers.Dense(64, activation='relu', input_shape=(X_train.shape[1],)),
layers.Dropout(0.5),
layers.Dense(64, activation='relu'),
layers.Dense(1)
])

model_dropout.compile(optimizer='adam', loss='mean_squared_error')
model_dropout.fit(X_train, y_train, epochs=10, validation_split=0.2)

Epoch 1/10
/usr/local/lib/python3.10/dist-packages/keras/src/layers/core/dense.py:87: UserWa
super().__init__(activity_regularizer=activity_regularizer, **kwargs)
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - loss: 15149.9199 - val_loss: 22.0540
Epoch 2/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 1051.6801 - val_loss: 42.0648
Epoch 3/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 574.1198 - val_loss: 13.8391
Epoch 4/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 236.6241 - val_loss: 43.7359
Epoch 5/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 233.4340 - val_loss: 3.3071
Epoch 6/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 4ms/step - loss: 91.3531 - val_loss: 39.9956
Epoch 7/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 4s 7ms/step - loss: 132.5156 - val_loss: 7.4772
Epoch 8/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 5ms/step - loss: 47.8788 - val_loss: 8.1829
Epoch 9/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 48.0613 - val_loss: 15.4679
Epoch 10/10

https://colab.research.google.com/drive/1PuT4Pqwhlz8ys8ALxVP1rUKzyzVp0Mi3?usp=sharing… 1/4
11/27/24, 1:01 AM _DL_Lab5..ipynb - Colab
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 34.5671 - val_loss: 2.9151
<keras.src.callbacks.history.History at 0x7bcedf055540>

2. Early Stopping

# Early stopping callback


early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=3, rest

# Train the model with early stopping


model_dropout.fit(X_train, y_train, epochs=10, validation_split=0.2, callbacks=[early_

Epoch 1/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 22.5465 - val_loss: 3.3754
Epoch 2/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 10.0820 - val_loss: 4.1327
Epoch 3/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 6.0967 - val_loss: 2.1741
Epoch 4/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 4.9395 - val_loss: 2.2975
Epoch 5/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 3.6868 - val_loss: 1.5492
Epoch 6/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 3.2472 - val_loss: 1.4901
Epoch 7/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 2.9435 - val_loss: 1.5168
Epoch 8/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 2.2726 - val_loss: 1.3492
Epoch 9/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 2.6524 - val_loss: 1.3322
Epoch 10/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 2.0697 - val_loss: 1.1929
<keras.src.callbacks.history.History at 0x7bcede6444f0>

3. L1 regularization

# Build model with L2 regularization


model_l2 = models.Sequential([
layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0
layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0
layers.Dense(1)
])

model_l2.compile(optimizer='adam', loss='mean_squared_error')
model_l2.fit(X_train, y_train, epochs=10, validation_split=0.2)

Epoch 1/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - loss: 298.8849 - val_loss: 1.6367
Epoch 2/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 1.6259 - val_loss: 1.3696
Epoch 3/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 10.6650 - val_loss: 2.8881

https://colab.research.google.com/drive/1PuT4Pqwhlz8ys8ALxVP1rUKzyzVp0Mi3?usp=sharing… 2/4
11/27/24, 1:01 AM _DL_Lab5..ipynb - Colab
Epoch 4/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 2.9807 - val_loss: 1.6852
Epoch 5/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 5.3804 - val_loss: 1.1550
Epoch 6/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 1.3497 - val_loss: 1.3505
Epoch 7/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 20.1218 - val_loss: 5.1635
Epoch 8/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 1.6839 - val_loss: 1.3582
Epoch 9/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 1.1762 - val_loss: 1.2227
Epoch 10/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 1.0459 - val_loss: 1.3154
<keras.src.callbacks.history.History at 0x7bcede6dc2e0>

4. L2 regularization

# Build model with L2 regularization


model_l2 = models.Sequential([
layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0
layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l2(0
layers.Dense(1)
])

model_l2.compile(optimizer='adam', loss='mean_squared_error')
model_l2.fit(X_train, y_train, epochs=10, validation_split=0.2)

Epoch 1/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - loss: 221.3601 - val_loss: 2.4019
Epoch 2/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 144.9611 - val_loss: 1.4573
Epoch 3/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 1.9867 - val_loss: 1.2558
Epoch 4/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 1.5741 - val_loss: 10.6253
Epoch 5/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 1.8362 - val_loss: 98.7645
Epoch 6/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 156.8823 - val_loss: 1.2026
Epoch 7/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 3.0938 - val_loss: 1.1754
Epoch 8/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 3.0639 - val_loss: 1.9080
Epoch 9/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 1.5629 - val_loss: 2.1380
Epoch 10/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 15.6031 - val_loss: 7.3652
<keras.src.callbacks.history.History at 0x7bcee04c3880>

5. Elastic Net

https://colab.research.google.com/drive/1PuT4Pqwhlz8ys8ALxVP1rUKzyzVp0Mi3?usp=sharing… 3/4
11/27/24, 1:01 AM _DL_Lab5..ipynb - Colab
# Build model with Elastic Net (L1 and L2 regularization)
model_elastic_net = models.Sequential([
layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l1_l2
layers.Dense(64, activation='relu', kernel_regularizer=tf.keras.regularizers.l1_l2
layers.Dense(1)
])

model_elastic_net.compile(optimizer='adam', loss='mean_squared_error')
model_elastic_net.fit(X_train, y_train, epochs=10, validation_split=0.2)

Epoch 1/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - loss: 4133.4990 - val_loss: 5.0145
Epoch 2/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 5.2582 - val_loss: 5.3149
Epoch 3/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 3ms/step - loss: 4.3300 - val_loss: 3.9747
Epoch 4/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 3ms/step - loss: 7.3997 - val_loss: 68.9548
Epoch 5/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 2s 2ms/step - loss: 11.6055 - val_loss: 3.8571
Epoch 6/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 101.9356 - val_loss: 5.4602
Epoch 7/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 10.6571 - val_loss: 3.8880
Epoch 8/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 5.3400 - val_loss: 3.7929
Epoch 9/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 16.1976 - val_loss: 3.6659
Epoch 10/10
413/413 ━━━━━━━━━━━━━━━━━━━━ 1s 2ms/step - loss: 5.7418 - val_loss: 3.6570
<keras.src.callbacks.history.History at 0x7bced6253910>

https://colab.research.google.com/drive/1PuT4Pqwhlz8ys8ALxVP1rUKzyzVp0Mi3?usp=sharing… 4/4

You might also like