1.
Sorting Algorithms
a) Merge Sort
Explanation:
Merge Sort is a divide-and-conquer algorithm that splits an array into halves, recursively
sorts each half, and merges the sorted halves to produce the final sorted array.
Code:
#include <iostream>
using namespace std;
void merge(int arr[], int left, int mid, int right) {
int n1 = mid - left + 1;
int n2 = right - mid;
int L[n1], R[n2];
for (int i = 0; i < n1; i++) L[i] = arr[left + i];
for (int j = 0; j < n2; j++) R[j] = arr[mid + 1 + j];
int i = 0, j = 0, k = left;
while (i < n1 && j < n2) {
if (L[i] <= R[j]) arr[k++] = L[i++];
else arr[k++] = R[j++];
}
while (i < n1) arr[k++] = L[i++];
while (j < n2) arr[k++] = R[j++];
}
void mergeSort(int arr[], int left, int right) {
if (left < right) {
int mid = left + (right - left) / 2;
mergeSort(arr, left, mid);
mergeSort(arr, mid + 1, right);
merge(arr, left, mid, right);
}
}
int main() {
int arr[] = {12, 11, 13, 5, 6, 7};
int arr_size = sizeof(arr) / sizeof(arr[0]);
mergeSort(arr, 0, arr_size - 1);
for (int i = 0; i < arr_size; i++) cout << arr[i] << " ";
return 0;
}
Time Complexity Analysis:
• Best, Worst, Average Case: O(n log n)
b) Quick Sort
Explanation:
Quick Sort is a divide-and-conquer algorithm that selects a pivot, partitions the array around
the pivot, and recursively sorts the partitions.
Code:
#include <iostream>
using namespace std;
int partition(int arr[], int low, int high) {
int pivot = arr[high];
int i = low - 1;
for (int j = low; j < high; j++) {
if (arr[j] < pivot) {
i++;
swap(arr[i], arr[j]);
}
}
swap(arr[i + 1], arr[high]);
return i + 1;
}
void quickSort(int arr[], int low, int high) {
if (low < high) {
int pi = partition(arr, low, high);
quickSort(arr, low, pi - 1);
quickSort(arr, pi + 1, high);
}
}
int main() {
int arr[] = {10, 7, 8, 9, 1, 5};
int n = sizeof(arr) / sizeof(arr[0]);
quickSort(arr, 0, n - 1);
for (int i = 0; i < n; i++) cout << arr[i] << " ";
return 0;
}
Time Complexity Analysis:
• Best and Average Case: O(n log n)
• Worst Case: O(n^2)
C) Bubble Sort
Explanation:
Bubble Sort repeatedly steps through the list, compares adjacent elements, and swaps them if
they are in the wrong order.
Code:
#include <iostream>
using namespace std;
void bubbleSort(int arr[], int n) {
for (int i = 0; i < n - 1; i++) {
for (int j = 0; j < n - i - 1; j++) {
if (arr[j] > arr[j + 1]) {
swap(arr[j], arr[j + 1]);
}
}
}
}
int main() {
int arr[] = {64, 34, 25, 12, 22, 11, 90};
int n = sizeof(arr) / sizeof(arr[0]);
bubbleSort(arr, n);
for (int i = 0; i < n; i++) cout << arr[i] << " ";
return 0;
}
Time Complexity Analysis:
• Best Case: O(n) when the array is already sorted.
• Worst and Average Case: O(n^2)
OUTPUTS
Merge Sort
______________
Input: [12, 11, 13, 5, 6, 7]
Output: [ 5, 6, 7, 11, 12, 13]
Quick Sort
______________
Input: [10, 7, 8, 9, 1, 5]
Output: [ 1, 5, 7, 8, 9, 10]
Bubble Sort
______________
Input: [64, 34, 25, 12, 22, 11, 90]
Output: [11, 12, 22, 25, 34, 64, 90]
2. Search Algorithms
a) Linear Search
Explanation:
Linear Search iteratively checks each element in the array to find the target.
Code:
#include <iostream>
using namespace std;
int linearSearch(int arr[], int n, int x) {
for (int i = 0; i < n; i++) {
if (arr[i] == x) return i;
}
return -1;
}
int main() {
int arr[] = {2, 3, 4, 10, 40};
int x = 10;
int result = linearSearch(arr, 5, x);
cout << "Element found at index " << result;
return 0;
}
Time Complexity Analysis:
• Best Case: O(1)
• Worst Case: O(n)
b) Binary Search
Explanation:
Binary Search divides the sorted array into halves to locate the target element efficiently.
Code:
#include <iostream>
using namespace std;
int binarySearch(int arr[], int l, int r, int x) {
while (l <= r) {
int m = l + (r - l) / 2;
if (arr[m] == x) return m;
if (arr[m] < x) l = m + 1;
else r = m - 1;
}
return -1;
}
int main() {
int arr[] = {2, 3, 4, 10, 40};
int x = 10;
int n = sizeof(arr) / sizeof(arr[0]);
int result = binarySearch(arr, 0, n - 1, x);
cout << "Element found at index " << result;
return 0;
}
Time Complexity Analysis:
• Best Case: O(1)
• Worst and Average Case: O(log n)
OUTPUT
Linear Search
______________
Input: Array: [2, 3, 4, 10, 40], Target: 10
Output: Element found at index 3
Binary Search
______________
Input: Sorted Array: [2, 3, 4, 10, 40], Target: 10
Output: Element found at index 3
3. Huffman Coding
Explanation:
Huffman Coding is a greedy algorithm used for data compression, where frequently
occurring characters are represented with shorter codes.
Code:
#include <iostream>
#include <queue>
#include <vector>
#include <map>
using namespace std;
struct Node {
char ch;
int freq;
Node* left;
Node* right;
Node(char character, int frequency) {
ch = character;
freq = frequency;
left = right = nullptr;
}
};
struct Compare {
bool operator()(Node* l, Node* r) {
return l->freq > r->freq;
}
};
void printCodes(Node* root, string str) {
if (!root) return;
if (root->ch != '$') cout << root->ch << ": " << str << "\n";
printCodes(root->left, str + "0");
printCodes(root->right, str + "1");
}
void HuffmanCodes(char arr[], int freq[], int size) {
priority_queue<Node*, vector<Node*>, Compare> minHeap;
for (int i = 0; i < size; i++) minHeap.push(new Node(arr[i], freq[i]));
while (minHeap.size() != 1) {
Node* left = minHeap.top(); minHeap.pop();
Node* right = minHeap.top(); minHeap.pop();
Node* top = new Node('$', left->freq + right->freq);
top->left = left;
top->right = right;
minHeap.push(top);
}
printCodes(minHeap.top(), "");
}
int main() {
char arr[] = {'a', 'b', 'c', 'd', 'e', 'f'};
int freq[] = {5, 9, 12, 13, 16, 45};
int size = sizeof(arr) / sizeof(arr[0]);
HuffmanCodes(arr, freq, size);
return 0;
}
Time Complexity Analysis:
• Complexity: O(n log n) due to priority queue operations for merging nodes
Huffman Coding
______________
Input: Characters: [a, b, c, d, e, f], Frequencies: [5, 9, 12, 13, 16, 45]
Output:
f: 0
c: 100
d: 101
a: 1100
b: 1101
e: 111
4. Minimum Spanning Tree
Kruskal’s Algorithm
Explanation:
Kruskal's algorithm finds a Minimum Spanning Tree (MST) by selecting edges in ascending
order of weight, ensuring that no cycles are formed.
Code:
#include <iostream>
#include <vector>
#include <algorithm>
using namespace std;
struct Edge {
int src, dest, weight;
};
struct Graph {
int V, E;
vector<Edge> edges;
};
struct DisjointSets {
int *parent, *rank;
DisjointSets(int n) {
parent = new int[n+1];
rank = new int[n+1];
for (int i = 0; i <= n; i++) {
parent[i] = i;
rank[i] = 0;
}
}
int find(int u) {
if (u != parent[u])
parent[u] = find(parent[u]);
return parent[u];
}
void merge(int u, int v) {
u = find(u), v = find(v);
if (rank[u] > rank[v]) parent[v] = u;
else parent[u] = v;
if (rank[u] == rank[v]) rank[v]++;
}
};
bool compare(Edge e1, Edge e2) {
return e1.weight < e2.weight;
}
void KruskalMST(Graph& graph) {
sort(graph.edges.begin(), graph.edges.end(), compare);
DisjointSets ds(graph.V);
vector<Edge> mst;
for (Edge& edge : graph.edges) {
int uRep = ds.find(edge.src);
int vRep = ds.find(edge.dest);
if (uRep != vRep) {
mst.push_back(edge);
ds.merge(uRep, vRep);
}
}
cout << "Edges in the MST:\n";
for (Edge& edge : mst) {
cout << edge.src << " - " << edge.dest << ": " << edge.weight << endl;
}
}
int main() {
Graph graph = {4, 5, {{0, 1, 10}, {0, 2, 6}, {0, 3, 5}, {1, 3, 15}, {2, 3, 4}}};
KruskalMST(graph);
return 0;
}
Time Complexity Analysis:
• Complexity: O(E log E), where E is the number of edges (due to sorting).
OUTPUT
Kruskal's Algorithm
______________
Input: Vertices: 4, Edges: [(0,1,10), (0,2,6), (0,3,5), (1,3,15), (2,3,4)]
Output:
Edges in the MST:
2 - 3: 4
0 - 3: 5
0 - 1: 10
5. Dijkstra's Algorithm
Explanation:
Dijkstra’s algorithm finds the shortest path from a source node to all other nodes in a
weighted graph with non-negative weights using a priority queue.
Code:
#include <iostream>
#include <vector>
#include <queue>
using namespace std;
void dijkstra(vector<vector<pair<int, int>>>& graph, int src) {
int V = graph.size();
vector<int> dist(V, INT_MAX);
priority_queue<pair<int, int>, vector<pair<int, int>>, greater<>> pq;
pq.push({0, src});
dist[src] = 0;
while (!pq.empty()) {
int u = pq.top().second;
pq.pop();
for (auto& [weight, v] : graph[u]) {
if (dist[u] + weight < dist[v]) {
dist[v] = dist[u] + weight;
pq.push({dist[v], v});
}
}
}
cout << "Vertex Distance from Source " << src << "\n";
for (int i = 0; i < V; i++) cout << i << ": " << dist[i] << "\n";
}
int main() {
int V = 5;
vector<vector<pair<int, int>>> graph(V);
graph[0] = {{2, 1}, {4, 2}};
graph[1] = {{7, 3}};
graph[2] = {{3, 3}};
graph[3] = {{1, 4}};
graph[4] = {};
dijkstra(graph, 0);
return 0;
}
Time Complexity Analysis:
• Complexity: O((V + E) log V), where V is the number of vertices and E is the
number of edges.
Dijkstra's Algorithm
______________
Input: Graph: 0 -> (1, 2), (2, 4), 1 -> (3, 7), 2 -> (3, 3), 3 -> (4, 1), 4 -> None
Output:
Vertex Distance from Source 0
0: 0
1: 2
2: 4
3: 7
4: 8
6. Bellman-Ford Algorithm
Explanation:
Bellman-Ford algorithm finds the shortest paths from a source vertex to all other vertices in a
graph with possible negative weights.
Code:
#include <iostream>
#include <vector>
using namespace std;
struct Edge {
int src, dest, weight;
};
void bellmanFord(vector<Edge>& edges, int V, int src) {
vector<int> dist(V, INT_MAX);
dist[src] = 0;
for (int i = 1; i < V; i++) {
for (Edge& edge : edges) {
if (dist[edge.src] != INT_MAX && dist[edge.src] + edge.weight < dist[edge.dest])
dist[edge.dest] = dist[edge.src] + edge.weight;
}
}
cout << "Vertex Distance from Source " << src << "\n";
for (int i = 0; i < V; i++) cout << i << ": " << dist[i] << "\n";
}
int main() {
int V = 5;
vector<Edge> edges = {{0, 1, -1}, {0, 2, 4}, {1, 2, 3}, {1, 3, 2}, {1, 4, 2}, {3, 2, 5}, {3, 1, 1}, {4, 3, -3}};
bellmanFord(edges, V, 0);
return 0;
}
Time Complexity Analysis:
• Complexity: O(V * E), where V is the number of vertices and E is the number of
edges.
OUTPUT
Bellman-Ford Algorithm
______________
Input: Graph: Vertices: 5, Edges: [(0,1,-1), (0,2,4), (1,2,3), (1,3,2), (1,4,2), (3,2,5), (3,1,1), (4,3,-3)]
Output:
Vertex Distance from Source 0
0: 0
1: -1
2: 2
3: -2
4: 1
7. N Queen’s Problem (Backtracking)
Explanation:
The N Queen’s problem is solved by placing N queens on an NxN chessboard such that no
two queens threaten each other using backtracking.
Code:
#include <iostream>
using namespace std;
bool isSafe(int board[], int row, int col) {
for (int i = 0; i < col; i++)
if (board[i] == row || abs(board[i] - row) == col - i) return false;
return true;
}
void solveNQueens(int board[], int col, int N) {
if (col == N) {
for (int i = 0; i < N; i++) cout << board[i] << " ";
cout << endl;
return;
}
for (int i = 0; i < N; i++) {
if (isSafe(board, i, col)) {
board[col] = i;
solveNQueens(board, col + 1, N);
}
}
}
int main() {
int N = 4;
int board[N];
solveNQueens(board, 0, N);
return 0;
}
Time Complexity Analysis:
• Complexity: O(N!), where N is the number of queens.
OUTPUT
N Queen’s Problem
______________
Input: N: 4
Output:
[2, 4, 1, 3]
[3, 1, 4, 2]
8. Matrix Multiplication
Explanation:
Matrix Multiplication involves multiplying two matrices and producing a resultant matrix.
Code:
#include <iostream>
using namespace std;
void matrixMultiplication(int A[2][2], int B[2][2]) {
int C[2][2] = {0};
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) {
C[i][j] = 0;
for (int k = 0; k < 2; k++) {
C[i][j] += A[i][k] * B[k][j];
}
}
}
cout << "Resultant Matrix: \n";
for (int i = 0; i < 2; i++) {
for (int j = 0; j < 2; j++) cout << C[i][j] << " ";
cout << endl;
}
}
int main() {
int A[2][2] = {{1, 2}, {3, 4}};
int B[2][2] = {{5, 6}, {7, 8}};
matrixMultiplication(A, B);
return 0;
}
Time Complexity Analysis:
• Complexity: O(n^3) for standard matrix multiplication.
OUTPUT
Matrix Multiplication
______________
Input: Matrix A: [1, 2], [3, 4], Matrix B: [5, 6], [7, 8]
Output:
Resultant Matrix:
[19, 22]
[43, 50]
9. Longest Common Subsequence (LCS)
Explanation:
The LCS algorithm finds the longest subsequence common to two sequences using dynamic
programming.
Code:
#include <iostream>
#include <string>
using namespace std;
int lcs(string X, string Y) {
int m = X.length(), n = Y.length();
int L[m+1][n+1];
for (int i = 0; i <= m; i++) {
for (int j = 0; j <= n; j++) {
if (i == 0 || j == 0) L[i][j] = 0;
else if (X[i-1] == Y[j-1]) L[i][j] = L[i-1][j-1] + 1;
else L[i][j] = max(L[i-1][j], L[i][j-1]);
}
}
return L[m][n];
}
int main() {
string X = "AGGTAB";
string Y = "GXTXAYB";
cout << "Length of LCS is " << lcs(X, Y) << endl;
return 0;
}
Time Complexity Analysis:
• Complexity: O(m * n), where m and n are the lengths of the sequences.
OUTPUT
Longest Common Subsequence
______________
Input: String 1: "AGGTAB", String 2: "GXTXAYB"
Output: Length of LCS is 4
10. String Matching Algorithms
a) Naïve String Matching Algorithm
Explanation:
The Naïve String Matching algorithm checks for a match by comparing each substring of the
text with the pattern.
Code:
#include <iostream>
#include <string>
using namespace std;
void naiveSearch(const string& text, const string& pattern) {
int n = text.length();
int m = pattern.length();
for (int i = 0; i <= n - m; i++) {
int j;
for (j = 0; j < m; j++) {
if (text[i + j] != pattern[j]) break;
}
if (j == m) {
cout << "Pattern found at index " << i << endl;
}
}
}
int main() {
string text = "AABAACAADAABAABA";
string pattern = "AABA";
naiveSearch(text, pattern);
return 0;
}
Time Complexity Analysis:
• Complexity: O((n - m + 1) * m), where n is the length of the text and m is the length
of the pattern.
OUTPUT
Naïve String Matching
______________
Input: Text: "AABAACAADAABAABA", Pattern: "AABA"
Output:
Pattern found at index 0
Pattern found at index 9
Pattern found at index 12
b) Rabin-Karp Algorithm
Explanation:
The Rabin-Karp algorithm uses a hash function to compute the hash of the pattern and
compare it with hashes of substrings in the text, reducing unnecessary comparisons.
Code:
#include <iostream>
#include <string>
using namespace std;
#define d 256
const int q = 101; // A prime number
void rabinKarp(const string& text, const string& pattern) {
int n = text.length();
int m = pattern.length();
int p = 0; // Hash value for pattern
int t = 0; // Hash value for text
int h = 1;
for (int i = 0; i < m - 1; i++) h = (h * d) % q;
for (int i = 0; i < m; i++) {
p = (d * p + pattern[i]) % q;
t = (d * t + text[i]) % q;
}
for (int i = 0; i <= n - m; i++) {
if (p == t) {
int j;
for (j = 0; j < m; j++) {
if (text[i + j] != pattern[j]) break;
}
if (j == m) cout << "Pattern found at index " << i << endl;
}
if (i < n - m) {
t = (d * (t - text[i] * h) + text[i + m]) % q;
if (t < 0) t += q;
}
}
}
int main() {
string text = "GEEKS FOR GEEKS";
string pattern = "GEEK";
rabinKarp(text, pattern);
return 0;
}
Time Complexity Analysis:
• Average Case Complexity: O(n + m)
• Worst Case Complexity: O((n - m + 1) * m)
OUTPUT
Rabin-Karp Algorithm
______________
Input: Text: "BEERS FOR BEERS", Pattern: "BEER"
Output:
Pattern found at index 0
Pattern found at index 10
c) Knuth-Morris-Pratt (KMP) Algorithm
Explanation:
The KMP algorithm preprocesses the pattern to create a "longest prefix suffix" (LPS) array,
which helps skip unnecessary comparisons in the search process.
Code:
#include <iostream>
#include <string>
#include <vector>
using namespace std;
void computeLPSArray(const string& pattern, vector<int>& lps) {
int length = 0;
int i = 1;
lps[0] = 0;
while (i < pattern.length()) {
if (pattern[i] == pattern[length]) {
length++;
lps[i] = length;
i++;
} else {
if (length != 0) {
length = lps[length - 1];
} else {
lps[i] = 0;
i++;
}
}
}
}
void KMPSearch(const string& text, const string& pattern) {
int n = text.length();
int m = pattern.length();
vector<int> lps(m);
computeLPSArray(pattern, lps);
int i = 0; // Index for text
int j = 0; // Index for pattern
while (i < n) {
if (pattern[j] == text[i]) {
j++;
i++;
}
if (j == m) {
cout << "Pattern found at index " << i - j << endl;
j = lps[j - 1];
} else if (i < n && pattern[j] != text[i]) {
if (j != 0) j = lps[j - 1];
else i++;
}
}
}
int main() {
string text = "ABABDABACDABABCABAB";
string pattern = "ABABCABAB";
KMPSearch(text, pattern);
return 0;
}
Time Complexity Analysis:
• Preprocessing Time Complexity: O(m), where m is the length of the pattern.
• Search Time Complexity: O(n), where n is the length of the text.
OUTPUT
Knuth-Morris-Pratt (KMP) Algorithm
______________
Input: Text: "ABABDABACDABABCABAB", Pattern: "ABABCABAB"
Output:
Pattern found at index 10