Linear Regression –
import matplotlib.pyplot as plt
from scipy import stats
x = [5,7,8,7,2,17,2,9,4,11,12,9,6]
y = [99,86,87,88,111,86,103,87,94,78,77,85,86]
slope, intercept, r, p, std_err = stats.linregress(x, y)
def myfunc(x):
return slope * x + intercept
mymodel = list(map(myfunc, x))
plt.scatter(x, y)
plt.plot(x, mymodel)
plt.show()
pip install pandas
import pandas as pd
import matplotlib.pyplot as plt
mean_X =df[‘Hours Worked’].mean()
mean_Y =df[‘Weekly profit’].mean()
print(f’ mean_X={mean_X}’)
print(f ‘mean_Y={mean_Y}’)
Logistic Regression-
import numpy as np
import matplotlib.pyplot as plot
from sklearn.datasets import load_digits
# Load Digits dataset
digits = load_digits()
# X is a collection of arrays of 64 (8X8) pixel intensity values for different
handwritten digits
X = digits.data
# y is the corresponding value of digits
y = digits.target
def plot_digit(x):
'Function to plot heatmaps for pixel intensity arrays'
plot.imshow(x.reshape(8,8)) # x.reshape(cols,rows)
plot_digit(X[104])
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2,
random_state=1)
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
lrm = LogisticRegression(max_iter=5000)
lrm.fit(X_train, y_train)
y_predict = lrm.predict(X_test)
accuracy = accuracy_score(y_predict, y_test)
print(f"Accuracy of the model :{accuracy:.2f}")
SVM-
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import SVC
from sklearn.datasets import make_blobs
# Generate synthetic data
X, y = make_blobs(n_samples=100, centers=2, random_state =6, cluster_std=1.5)
# Fit the SVM model
svm = SVC(kernel='linear', C=1)
svm.fit(X, y)
# Get the separating
w = svm.coef_[0]
b=svm.intercept_[0]
# Calculate slope and intercept for the hyperplane
x0 = np.linspace(min(X[:, 0]), max(X[:, 0]), 100)
x1 = -(w[0] / w[1]) * x0 - b / w[1]
# Plot the data points and the hyperplane
plt.scatter(X[:, 0], X[:, 1], c=y, cmap='coolwarm', s=50)
plt.plot(x0, x1, 'k-', label='Hyperplane')
# Plot the support vectors
plt.scatter(svm.support_vectors_[:, 0],
svm.support_vectors_[:, 1],
facecolors='none',
edgecolors='k',s=100,label='SupportVectors')
plt.title("SVM Classification with Hyperplane")
plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.legend()
plt.grid(True)
plt.show()
Hebbian Learning-
import numpy as np
import matplotlib.pyplot as plt
def hebbian_learning(inputs, outputs,
learning_rate): num_inputs = inputs.shape[1]
weights = np.zeros(num_inputs)
for i in range(len(inputs)):
# Update weights according to the Hebbian
rule: w = w + η *x * y
weights += learning_rate * inputs[i] *
outputs[i]
return weights
# Training data: Let's assume we have 3 input
neurons (features) and binary output
inputs = np.array(
[ [1, 0, 1], # Input 1
[1, 1, 0], # Input 2
[0, 1, 1], # Input 3
])
outputs = np.array([1, 0, 1]) # Desired outputs
# Set learning rate (η)
learning_rate = 0.1
# Train the network using Hebbian learning
weights = hebbian_learning(inputs, outputs,
learning_rate)
# Display the final weights
print("Final weights after Hebbian learning:",
weights)
# Visualizing how the weights change after
each iteration iterations = np.arange(1,
len(inputs) + 1)
weight_values = np.zeros((len(iterations),
len(weights)))
for i in range(len(iterations)):
weight_values[i] =
hebbian_learning(inputs[:i+1], outputs[:i+1],
learning_rate)
# Plot the weight changes over iterations
plt.plot(iterations, weight_values[:, 0],
label="Weight 1") plt.plot(iterations,
weight_values[:, 1], label="Weight 2")
plt.plot(iterations, weight_values[:, 2],
label="Weight 3") plt.xlabel('Iteration')
plt.ylabel('Weight Value')
plt.title('Weight Updates in Hebbian Learning')
plt.legend()
plt.grid(True)
plt.show()
Expectation Maximization
Algorithm-
import numpy as np
from scipy.stats import norm
def expectation_maximization(X,
num_clusters=2, max_iters=100, tol=1e-4):
np.random.seed(42)
X=np.array(X)
# Initialize means with the lowest and highest
values in the dataset
mu = np.array([np.min(X), np.max(X)])
sigma = np.full(num_clusters, 1.0) # Initialize
variances
pi = np.full(num_clusters, 1 / num_clusters)
# Equal mixing coefficients
print(f"Initial Means: {mu}, Initial Variances:
{sigma}, Initial Mixing Coefficients: {pi}\n")
for iteration in range(max_iters):
# Expectation Step
responsibilities = np.zeros((len(X),
num_clusters))
for j in range(num_clusters):
responsibilities[:, j] = pi[j] * norm.pdf(X, mu[j],
np.sqrt(sigma[j]))
responsibilities_sum =
responsibilities.sum(axis=1, keepdims=True) +
1e-8
responsibilities /= responsibilities_sum #
Normalize
# Maximization Step
N_k = responsibilities.sum(axis=0)
N_k = np.maximum(N_k, 1e-8) # Prevent zero
division new_mu = np.sum(responsibilities *
X[:, np.newaxis], axis=0) / N_k
new_sigma = np.sum(responsibilities * (X[:,
np.newaxis] - new_mu) ** 2, axis=0) / N_k
new_pi = N_k / len(X)
# Print iteration-wise values
print(f"Iteration {iteration + 1}:")
print(f"Means: {new_mu}")
print(f"Variances: {new_sigma}")
print(f"Mixing Coefficients: {new_pi}\n")
# Convergence Check
if np.allclose(mu, new_mu, atol=tol) and
np.allclose(sigma, new_sigma, atol=tol):
print("Converged!\n")
break
mu, sigma, pi = new_mu, new_sigma, new_pi
# Update parameters return mu, sigma, pi,
responsibilities
# Given dataset
X = [2.0, 2.2, 1.8, 6.0, 5.8, 6.2]
# Run EM algorithm
mu_final, sigma_final, pi_final,
responsibilities_final =
expectation_maximization(X)
# Print final results
print(f"Final Means: {mu_final}")
print(f"Final Variances: {sigma_final}")
print(f"Final Mixing Coefficients: {pi_final}")
print("Final Responsibilities:")
print(responsibilities_final)
McCulloch-Pitts model-
def mcculloch_pitts(inputs, weights, threshold):
weighted_sum = sum(i * w for i, w in
zip(inputs, weights))
return 1 if weighted_sum >= threshold else
# Define gates
AND_GATE = {
"weights": [1, 1],
"threshold": 2
OR_GATE = {
"weights": [1, 1],
"threshold": 1
}
# Test inputs
test_cases = [(0, 0), (0, 1), (1, 0), (1, 1)]
print("AND Gate:")
for x1, x2 in test_cases:
output = mcculloch_pitts([x1, x2],
AND_GATE["weights"], AND_GATE["threshold"])
print(f"Input: ({x1}, {x2}) -> Output:
{output}")
print("\nOR Gate:")
for x1, x2 in test_cases:
output = mcculloch_pitts([x1, x2],
OR_GATE["weights"], OR_GATE["threshold"])
print(f"Input: ({x1}, {x2}) -> Output:
{output}")
Single layer perceptron –
import numpy as np
# Define the perceptron class
class Perceptron:
def __init__(self, input_size, learning_rate=0.1):
self.weights = np.zeros(input_size) # Initialize weights to zeros
self.bias = 0 # Initialize bias to zero
self.learning_rate = learning_rate # Learning rate
# Activation function (step function)
def activation(self, x):
return 1 if x >= 0 else 0
# Forward pass: calculate the weighted sum and apply the activation
function
def forward(self, inputs):
weighted_sum = np.dot(inputs, self.weights) + self.bias
return self.activation(weighted_sum)
# Train the perceptron using the training data
def train(self, inputs, targets, epochs=10):
for epoch in range(epochs):
for x, target in zip(inputs, targets):
output = self.forward(x)
error = target - output # Calculate the error
# Update weights and bias based on the error
self.weights += self.learning_rate * error * x
self.bias += self.learning_rate * error
print(f"Epoch {epoch+1}/{epochs}, Weights: {self.weights}, Bias:
{self.bias}")
# Define input data for AND, OR, and NOR gates
inputs = np.array([[0, 0], [0, 1], [1, 0], [1, 1]])
# Target outputs for AND, OR, and NOR gates
and_output = np.array([0, 0, 0, 1]) # AND Gate
or_output = np.array([0, 1, 1, 1]) # OR Gate
nor_output = np.array([1, 0, 0, 0]) # NOR Gate
# Create perceptron instances for AND, OR, and NOR gates
and_perceptron = Perceptron(input_size=2)
or_perceptron = Perceptron(input_size=2)
nor_perceptron = Perceptron(input_size=2)
# Train the perceptrons
print("Training for AND Gate:")
and_perceptron.train(inputs, and_output)
print("\nTraining for OR Gate:")
or_perceptron.train(inputs, or_output)
print("\nTraining for NOR Gate:")
nor_perceptron.train(inputs, nor_output)
# Test the perceptrons on all inputs
print("\nTesting AND Gate:")
for x in inputs:
print(f"Input: {x}, Output: {and_perceptron.forward(x)}")
print("\nTesting OR Gate:")
for x in inputs:
print(f"Input: {x}, Output: {or_perceptron.forward(x)}")
print("\nTesting NOR Gate:")
for x in inputs:
print(f"Input: {x}, Output: {nor_perceptron.forward(x)}")
Backpropagation-
import numpy as np
# Sigmoid activation function and its derivative
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def sigmoid_derivative(x):
return x * (1 - x)
# Training Data for XOR
X = np.array([[0, 0],
[0, 1],
[1, 0],
[1, 1]])
# Expected XOR output
y = np.array([[0], [1], [1], [0]])
# Initialize weights and biases randomly
np.random.seed(1)
input_size = 2
hidden_size = 3
output_size = 1
W1 = np.random.uniform(-1, 1, (input_size, hidden_size)) # Weights for
input -> hidden
b1 = np.random.uniform(-1, 1, (1, hidden_size)) # Bias for hidden layer
W2 = np.random.uniform(-1, 1, (hidden_size, output_size)) # Weights for
hidden -> output
b2 = np.random.uniform(-1, 1, (1, output_size)) # Bias for output layer
# Training parameters
epochs = 10000
learning_rate = 0.1
# Training loop
for epoch in range(epochs):
# ---- Forward Pass ----
hidden_input = np.dot(X, W1) + b1 # Net input to hidden layer
hidden_output = sigmoid(hidden_input) # Activation of hidden layer
final_input = np.dot(hidden_output, W2) + b2 # Net input to output
layer
predicted_output = sigmoid(final_input) # Activation of output layer
# ---- Compute Error ----
error = (y - predicted_output)
#print(final_output)
# ---- Backpropagation ----
# Compute gradients for output layer
error2 = error * sigmoid_derivative(predicted_output)
#print (d_output)
# Compute gradients for hidden layer
error1 = np.dot(error2, W2.T) * sigmoid_derivative(hidden_output)
# ---- Update Weights and Biases ----
W2 += np.dot(hidden_output.T, error2) * learning_rate
b2 += np.sum(error2, axis=0, keepdims=True) * learning_rate
W1 += np.dot(X.T, error1) * learning_rate
b1 += np.sum(error1, axis=0, keepdims=True) * learning_rate
# Print loss every 1000 epochs
if epoch % 1000 == 0:
loss = np.mean(np.abs(error))
print(f"Epoch {epoch}: Loss = {loss:.4f}")
# ---- Testing the trained network ----
print("\nTrained Network Output:")
print(predicted_output.round()) # Rounded output for XOR operation
PCA-
Import numpy as np
Import matplotlib.pyplot as plt
#Sampledataset(2D)
X=np.array([[2.5,2.4],
[0.5,0.7],
[2.2,2.9],
[1.9,2.2],
[3.1,3.0],
[2.3,2.7],
[2.0,1.6],
[1.0,1.1],
[1.5,1.6],
[1.1,0.9]])
#Step1:MeanCentering
mean_X=np.mean(X,axis=0)
X_centered=X-mean_X
#Step2:ComputeCovarianceMatrix
cov_matrix=np.cov(X_centered.T)
#Step3:ComputeEigenvaluesandEigenvectors
eigenvalues,eigenvectors=np.linalg.eig(cov_matrix)
#Step4:SortEigenvaluesandSelectPrincipalComponent
sorted_indices=np.argsort(eigenvalues)[::-1] #Sortindescendingorder
principal_component=eigenvectors[:,sorted_indices[0]] #Firsteigenvector
# Step 5: Project Data onto Principal Component
X_pca = X_centered @ principal_component # 1D Projection
# Step 6: Reconstruct 2D points from 1D projection for visualization
X_reconstructed = np.outer(X_pca, principal_component) + mean_X
#---- Plot the Original Data, Principal Component, and Projected Data----
plt.figure(figsize=(8, 6))
# Scatter original data points
plt.scatter(X[:, 0], X[:, 1], color='blue', label='Original Data')
# Drawprincipal component line
pc_line = np.array([mean_X- 3 * principal_component, mean_X + 3 *
principal_component]) plt.plot(pc_line[:, 0], pc_line[:, 1], 'k--',
label='Principal Component')
# Scatter projected points (onto principal component)
plt.scatter(X_reconstructed[:, 0], X_reconstructed[:, 1], color='red',
label='Projected Data')
# Connect original points to their projections
for i in range(len(X)):
plt.plot([X[i, 0], X_reconstructed[i, 0]], [X[i, 1], X_reconstructed[i, 1]],
'gray', linestyle='dotted') plt.xlabel("Feature 1")
plt.ylabel("Feature 2")
plt.title("PCA Projection Visualization")
plt.legend()
plt.grid()
plt.show()