Ex.
No : 1
Hello World Using Fork-Join Parallelism
Date :
AIM:
To write a C program for Hello World to demonstrate fork-join parallelism.
ALGORITHM:
1. Start
2. Include header files for OpenMP
3. Specify the parallel region using #pragma omp parallel
4. Write code for printing "Hello World"
5. Specify the number of threads using external variables
6. Compile using gcc -o filename -fopenmp filename.c
7. Run the program using ./filename
8. Stop
PROGRAM:
#include <omp.h>
#include <stdio.h>
int main() {
#pragma omp parallel
{
printf("Hello World from thread %d\n", omp_get_thread_num());
}
return 0;
}
1
OUTPUT:
RESULT:
Thus, the C program for demonstrating OpenMP Fork-Join parallelism for Hello World
was written, executed, and the output was verified successfully.
2
Ex. No : 2
Matrix-Vector Multiplication Using OpenMP
Date :
AIM:
To write a C program to compute matrix-vector multiplication with OpenMP directives.
ALGORITHM:
1. Start
2. Include header files for OpenMP
3. Get matrix and vector from user
4. Write multiplication code A*x in the parallel region
5. Print the result
6. Stop
PROGRAM:
#include <stdio.h>
#include <omp.h>
int main() {
int r, c, i, j;
float a[10][10], b[10], c_result[10];
printf("Enter the number of rows and columns: ");
scanf("%d%d", &r, &c);
printf("Enter the matrix elements:\n");
for(i = 0; i < r; i++)
for(j = 0; j < c; j++)
scanf("%f", &a[i][j]);
printf("Enter the vector elements:\n");
for(i = 0; i < c; i++)
scanf("%f", &b[i]);
#pragma omp parallel for
for(i = 0; i < r; i++) {
c_result[i] = 0;
for(j = 0; j < c; j++)
c_result[i] += a[i][j] * b[j];
}
3
printf("Resulting vector:\n");
for(i = 0; i < r; i++)
printf("%f\n", c_result[i]);
return 0;
}
OUTPUT:
RESULT:
Thus, the C program for computing matrix-vector multiplication with OpenMP
directives was written, executed, and the output was verified successfully.
4
Ex. No : 3A
Sum of Array Elements Using OpenMP
Date :
AIM:
To write a C program to find the sum of all elements in an array using OpenMP
directives.
ALGORITHM:
1. Start
2. Include header files for OpenMP
3. Specify the parallel region using #pragma omp parallel
4. Get the number of elements and the elements of the array
5. Write the sum logic in parallel region
6. Print the sum of elements
7. Stop
PROGRAM:
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main() {
float Array[100], sum = 0.0;
int array_size, i;
printf("Enter the size of the array: ");
scanf("%d", &array_size);
if (array_size <= 0) {
printf("Array size should be positive!\n");
exit(1);
}
for(i = 0; i < array_size; i++) {
printf("Enter element %d: ", i);
scanf("%f", &Array[i]);
}
#pragma omp parallel for shared(sum)
for(i = 0; i < array_size; i++) {
#pragma omp critical
sum += Array[i];
}
5
printf("The sum of array elements using OpenMP is: %f\n", sum);
return 0;
}
OUTPUT:
RESULT:
Thus, the C program for computing the sum of elements in an array using OpenMP
directives was written, executed, and the output was verified successfully.
6
Ex. No : 3B
Finding the Largest Element Using OpenMP
Date :
AIM:
To write a C program to find the largest element in an array using OpenMP directives.
ALGORITHM:
1. Start
2. Include header files for OpenMP
3. Get array elements from the user
4. If only one element, print it as largest
5. Else, set first element as max
6. Compare and update max in parallel region
7. Print the maximum element
8. Stop
PROGRAM:
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
int main() {
int array[100], Noofelements, i, max;
printf("Enter the number of elements: ");
scanf("%d", &Noofelements);
if (Noofelements == 0) {
printf("The array cannot be empty!\n");
exit(0);
}
for(i = 0; i < Noofelements; i++) {
printf("Enter element %d: ", i);
scanf("%d", &array[i]);
}
max = array[0];
#pragma omp parallel for
for(i = 1; i < Noofelements; i++) {
#pragma omp critical
if(array[i] > max)
max = array[i];
7
}
printf("The largest number in the array is: %d\n", max);
return 0;
}
OUTPUT:
RESULT:
Thus, the C program for finding the largest element in an array using OpenMP directives
was written, executed, and the output was verified successfully.
8
Ex. No : 4
Message Passing Using MPI
Date :
AIM:
To write a C program for demonstrating message passing logic using MPI.
ALGORITHM:
1. Start
2. Include the MPI header
3. Initialize MPI using MPI_Init
4. Determine process ID using MPI_COMM_RANK
5. Determine number of processes using MPI_COMM_SIZE
6. Print Hello World message with process ID and total processes
7. Finalize MPI
8. Stop
PROGRAM:
#include <stdio.h>
#include <mpi.h>
int main(int argc, char *argv[]) {
int myrank, size;
// Initialize MPI environment
MPI_Init(&argc, &argv);
// Get the rank of the process
MPI_Comm_rank(MPI_COMM_WORLD, &myrank);
// Get the total number of processes
MPI_Comm_size(MPI_COMM_WORLD, &size);
// Print message from each process
printf("Processor %d of %d: Hello World!\n", myrank, size);
// Finalize the MPI environment
MPI_Finalize();
return 0;
}
9
IMPORTANT MPI FUNCTIONS AND THEIR PURPOSE:
MPI Function Purpose
MPI_Init Initializes the MPI environment.
MPI_Finalize Terminates the MPI environment and releases resources.
MPI_Comm_rank Determines the process's ID (rank) within the
communicator.
MPI_Comm_size Determines the number of processes in the communicator.
MPI_Bcast Performs a broadcast operation to send data from one
process to all others.
MPI_Barrier Synchronizes all processes in the communicator.
OUTPUT:
RESULT:
Thus, the C program for demonstrating message passing logic using MPI was written,
executed, and the output was verified successfully.
10
Ex. No : 5
All-Pairs Shortest Path Using Floyd’s Algorithm
Date :
AIM:
To write a C program to implement all-pairs shortest path problem using Floyd's
algorithm.
ALGORITHM:
1. Start
2. Import header files
3. Define number of nodes in the graph
4. Define minimum function
5. Define and initialize distance matrix
6. Apply Floyd-Warshall logic using nested loops
7. Print the final shortest path matrix
8. Stop
PROGRAM:
#include <stdio.h>
#include <math.h>
#include <stdlib.h>
#include <time.h>
#define INF 10
#ifndef min
#define min(a,b) ((a) < (b)) ? (a) : (b)
#endif
#define N 4
int distance_matrix[N][N] = {
{0, INF, -2, INF},
{4, 0, 3, INF},
{INF, INF, 0, 2},
{INF, -1, INF, 0}
};
int main(int argc, char *argv[]) {
int src, dst, middle;
printf("Adjacency Matrix before Floyd-Warshall Algorithm\n");
for (src = 0; src < N; src++) {
11
for (dst = 0; dst < N; dst++) {
printf("%d ", distance_matrix[src][dst]);
}
printf("\n");
}
for (middle = 0; middle < N; middle++) {
for (src = 0; src < N; src++) {
for (dst = 0; dst < N; dst++) {
if (distance_matrix[src][middle] + distance_matrix[middle][dst] <
distance_matrix[src][dst]) {
distance_matrix[src][dst] = distance_matrix[src][middle] +
distance_matrix[middle][dst];
}
}
}
}
printf("Adjacency Matrix after Floyd-Warshall Algorithm\n");
for (src = 0; src < N; src++) {
for (dst = 0; dst < N; dst++) {
printf("%d ", distance_matrix[src][dst]);
}
printf("\n");
}
return 0;
}
OUTPUT:
RESULT:
Thus, the C program for implementing All-Pairs Shortest Path using Floyd’s algorithm
was written, executed, and the output was verified successfully.
12
Ex. No : 6
Parallel Random Number Generation Using
Date :
Monte Carlo Method (OpenMP)
AIM:
To write a C program for generating random numbers in parallel using Monte Carlo
methods in OpenMP.
ALGORITHM:
1. Start
2. Include required header files
3. Print available processors and threads
4. Define a seed for each thread
5. Generate random numbers in each thread using Monte Carlo method
6. Display the results
7. Stop
PROGRAM:
#include <stdio.h>
#include <stdlib.h>
#include <omp.h>
double random_value(int *seed) {
*seed = (*seed * 1103515245 + 12345) % 2147483648;
return (double)(*seed) / 2147483648.0;
}
void monte_carlo(int n, int *seed) {
double *x;
x = (double *) malloc(n * sizeof(double));
#pragma omp parallel
{
int i, my_id = omp_get_thread_num();
int my_seed = *seed + my_id;
#pragma omp for
for (i = 0; i < n; i++) {
x[i] = random_value(&my_seed);
printf("Thread %d - Random[%d] = %f\n", my_id, i, x[i]);
}
}
free(x);
13
}
int main() {
int n = 10;
int seed = 12345;
printf("Parallel Random Number Generation using Monte Carlo (OpenMP)\n");
printf("Number of processors available = %d\n", omp_get_num_procs());
printf("Maximum number of threads = %d\n", omp_get_max_threads());
monte_carlo(n, &seed);
return 0;
}
OUTPUT:
RESULT:
Thus, the C program for generating random numbers in parallel using Monte Carlo
methods in OpenMP was written, executed, and the output was verified successfully.
14
Ex. No : 7
MPI Broadcast and Collective Communication
Date :
AIM:
To write a C program to implement MPI Broadcast and Collective Communication.
ALGORITHM:
1. Start
2. Include MPI header files
3. Initialize MPI environment using MPI_Init
4. Determine the rank (Process ID) and number of processes
5. If rank is 0 (root), initialize the data (e.g., data = 100)
6. Use MPI_Bcast() to broadcast the data from root to all other processes
7. Print the received data in each process
8. Finalize MPI using MPI_Finalize()
9. Stop
PROGRAM:
#include <stdio.h>
#include <mpi.h>
int main(int argc, char **argv) {
int rank, size;
int data;
MPI_Init(&argc, &argv); // Initialize MPI environment
MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Get current process rank
MPI_Comm_size(MPI_COMM_WORLD, &size); // Get total number of processes
if (rank == 0) {
data = 100; // Root process initializes data
printf("Process %d broadcasting data %d\n", rank, data);
}
MPI_Bcast(&data, 1, MPI_INT, 0, MPI_COMM_WORLD); // Broadcast data to all
processes
printf("Process %d received data %d from root process\n", rank, data);
MPI_Finalize(); // Finalize MPI
return 0;
}
15
OUTPUT:
RESULT:
Thus, the C program for MPI Broadcast and Collective Communication was written,
executed, and the output was verified successfully.
16
Ex. No : 8
MPI Scatter and Gather In C
Date :
AIM:
To write a C program to implement MPI Scatter and Gather operations.
ALGORITHM:
1. Start
2. Include header files and initialize MPI
3. Determine the number of processes and process ID (rank)
4. Root process (rank 0) initializes an array with data
5. Use MPI_Scatter() to distribute individual elements to all processes
6. Each process can modify its received data
7. Use MPI_Gather() to collect the modified data back to the root process
8. Root process prints the gathered result
9. Finalize MPI
10.Stop
PROGRAM:
#include <stdio.h>
#include <mpi.h>
int main(int argc, char **argv) {
int rank, size;
int globaldata[4], localdata;
MPI_Init(&argc, &argv); // Initialize MPI
MPI_Comm_size(MPI_COMM_WORLD, &size); // Get number of processes
MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Get process rank
if (rank == 0) {
for (int i = 0; i < size; i++)
globaldata[i] = i; // Initialize data in root
printf("Processor %d has data: ", rank);
for (int i = 0; i < size; i++)
printf("%d ", globaldata[i]);
printf("\n");
}
MPI_Scatter(globaldata, 1, MPI_INT, &localdata, 1, MPI_INT, 0,
MPI_COMM_WORLD);
printf("Processor %d received data %d\n", rank, localdata);
17
localdata = localdata * 2; // Modify data (example: double it)
MPI_Gather(&localdata, 1, MPI_INT, globaldata, 1, MPI_INT, 0,
MPI_COMM_WORLD);
if (rank == 0) {
printf("Processor %d gathered data: ", rank);
for (int i = 0; i < size; i++)
printf("%d ", globaldata[i]);
printf("\n");
}
MPI_Finalize(); // Finalize MPI
return 0;
}
OUTPUT:
RESULT:
Thus, the C program for MPI Scatter and Gather was written, executed, and the output
was verified successfully.
18
Ex. No : 9
MPI Send and Receive
Date :
AIM:
To write a C program for MPI Send and Receive operations between processes.
ALGORITHM:
1. Start
2. Include the required MPI header file
3. Initialize the MPI environment
4. Determine the rank (ID) of each process
5. If rank is 0 (sender), send data (e.g., 100) to process with rank 1 using MPI_Send()
6. If rank is 1 (receiver), receive data using MPI_Recv()
7. Print the data before and after communication in receiver process
8. Finalize MPI
9. Stop
PROGRAM:
#include <stdio.h>
#include <mpi.h>
int main(int argc, char *argv[]) {
int rank, data;
MPI_Status status;
MPI_Init(&argc, &argv); // Initialize MPI environment
MPI_Comm_rank(MPI_COMM_WORLD, &rank); // Get process ID
if (rank == 0) {
data = 100;
MPI_Send(&data, 1, MPI_INT, 1, 0, MPI_COMM_WORLD); // Send data to process 1
printf("Process 0 sent data %d to Process 1\n", data);
} else if (rank == 1) {
data = 0;
printf("Process 1 - Data before receive: %d\n", data);
MPI_Recv(&data, 1, MPI_INT, 0, 0, MPI_COMM_WORLD, &status); // Receive from
process 0
printf("Process 1 - Data after receive: %d\n", data);
}
MPI_Finalize(); // Finalize MPI
return 0;
}
19
OUTPUT:
RESULT:
Thus, the C program for MPI Send and Receive was written, executed, and the output
was verified successfully.
20
Ex. No : 10
Performing Parallel Rank with MPI
Date :
AIM:
To write a C program to perform parallel ranking using MPI where each process
determines the rank of its value among all processes.
ALGORITHM:
1. Start
2. Include required header files and initialize MPI
3. Each process generates a number (float or int)
4. Gather all numbers to the root process using MPI_Gather()
5. Sort the gathered numbers and determine their ranks
6. Scatter the ranks back to the respective processes using MPI_Scatter()
7. Each process prints its number and assigned rank
8. Finalize MPI
9. Stop
PROGRAM:
#include <stdlib.h>
#include <mpi.h>
#include <string.h>
typedef struct {
int comm_rank;
union {
float f;
int i;
} number;
} CommRankNumber;
int compare_float_comm_rank_number(const void *a, const void *b) {
CommRankNumber *a_num = (CommRankNumber *)a;
CommRankNumber *b_num = (CommRankNumber *)b;
if (a_num->number.f < b_num->number.f) return -1;
else if (a_num->number.f > b_num->number.f) return 1;
else return 0;
}
int compare_int_comm_rank_number(const void *a, const void *b) {
CommRankNumber *a_num = (CommRankNumber *)a;
CommRankNumber *b_num = (CommRankNumber *)b;
if (a_num->number.i < b_num->number.i) return -1;
21
else if (a_num->number.i > b_num->number.i) return 1;
else return 0;
}
int *get_ranks(void *gathered_numbers, int count, MPI_Datatype datatype) {
int datatype_size;
MPI_Type_size(datatype, &datatype_size);
CommRankNumber *ranked = malloc(count * sizeof(CommRankNumber));
for (int i = 0; i < count; i++) {
ranked[i].comm_rank = i;
memcpy(&ranked[i].number, gathered_numbers + i * datatype_size, datatype_size);
}
if (datatype == MPI_FLOAT)
qsort(ranked, count, sizeof(CommRankNumber), compare_float_comm_rank_number);
else
qsort(ranked, count, sizeof(CommRankNumber), compare_int_comm_rank_number);
int *ranks = malloc(count * sizeof(int));
for (int i = 0; i < count; i++) {
ranks[ranked[i].comm_rank] = i;
}
free(ranked);
return ranks;
}
void *gather_numbers_to_root(void *number, MPI_Datatype datatype, MPI_Comm comm) {
int rank, size, datatype_size;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
MPI_Type_size(datatype, &datatype_size);
void *gathered = NULL;
if (rank == 0) {
gathered = malloc(size * datatype_size);
}
MPI_Gather(number, 1, datatype, gathered, 1, datatype, 0, comm);
return gathered;
}
int TMPI_Rank(void *send_data, void *recv_data, MPI_Datatype datatype, MPI_Comm
comm) {
if (datatype != MPI_INT && datatype != MPI_FLOAT) return MPI_ERR_TYPE;
22
int rank, size;
MPI_Comm_rank(comm, &rank);
MPI_Comm_size(comm, &size);
void *gathered = gather_numbers_to_root(send_data, datatype, comm);
int *ranks = NULL;
if (rank == 0) {
ranks = get_ranks(gathered, size, datatype);
}
MPI_Scatter(ranks, 1, MPI_INT, recv_data, 1, MPI_INT, 0, comm);
if (rank == 0) {
free(gathered);
free(ranks);
}
return MPI_SUCCESS;
}
int main(int argc, char **argv) {
MPI_Init(&argc, &argv);
int rank;
MPI_Comm_rank(MPI_COMM_WORLD, &rank);
float value = 0.1 * (rank + 1); // Example value
int rank_result;
TMPI_Rank(&value, &rank_result, MPI_FLOAT, MPI_COMM_WORLD);
printf("Rank for %f on process %d is %d\n", value, rank, rank_result);
MPI_Finalize();
return 0;
}
23
OUTPUT:
RESULT:
Thus, the C program for performing parallel rank using MPI was written, executed, and
the output was verified successfully.
24
25