Faculty of Computing
SE-314: Software Construction
Class: BESE 13AB
Concurrency
Date: 09th Dec 2024
Assignment No 3
Instructor: Dr. Mehvish Rashid
Name CMS ID
Asna Maqsood 426990
Muhammad Owais Khan 404262
Umar Farooq 406481
Zainab Athar 405094
Contents
Concurrency Attainment in Software Systems..............................................................1
Web Servers............................................................................................................... 1
Database Systems...................................................................................................... 4
Batch Processing Systems.......................................................................................... 6
Microservices Architecture......................................................................................... 9
Operating Systems................................................................................................... 13
Distributed Systems................................................................................................. 16
Video/Graphics Rendering Systems..........................................................................19
Simulation Software................................................................................................. 22
Real-Time Communication Systems.........................................................................24
Containers and Virtualization Systems.....................................................................27
from flask import Flask, request
import threading
app = Flask(__name__)
def handle_request(client_id):
# Simulate processing time
print(f"Processing request from Client {client_id}")
import time
time.sleep(2)
print(f"Completed request from Client {client_id}")
@app.route('/process', methods=['GET'])
def process_request():
client_id = request.args.get('client_id', 'unknown')
thread = threading.Thread(target=handle_request, args=(client_id,))
thread.start()
return f"Request from Client {client_id} is being processed!"
if __name__ == '__main__':
app.run(threaded=True) # Enable multithreading
const http = require('http');
const server = http.createServer((req, res) => {
if (req.url === '/process') {
console.log('Processing request...');
setTimeout(() => {
res.writeHead(200, { 'Content-Type': 'text/plain' });
res.end('Request processed successfully!');
}, 2000); // Simulate async operation
} else {
res.writeHead(404, { 'Content-Type': 'text/plain' });
res.end('Not Found');
}
});
server.listen(3000, () => {
console.log('Server is listening on port 3000');
});
-- Transaction 1
START TRANSACTION;
SELECT balance FROM accounts WHERE account_id = 1 FOR UPDATE; -- Exclusive lock
UPDATE accounts SET balance = balance - 500 WHERE account_id = 1;
COMMIT;
-- Transaction 2
START TRANSACTION;
SELECT balance FROM accounts WHERE account_id = 1 FOR UPDATE; -- Waits until Transaction 1 is
committed
UPDATE accounts SET balance = balance + 500 WHERE account_id = 2;
COMMIT;
-- Transaction 1: Sets an isolation level
SET TRANSACTION ISOLATION LEVEL REPEATABLE READ;
START TRANSACTION;
SELECT balance FROM accounts WHERE account_id = 1;
-- Transaction 2: Tries to update the same record
START TRANSACTION;
UPDATE accounts SET balance = balance + 100 WHERE account_id = 1; -- Blocked until Transaction 1
commits or rolls back
-- Commit or Rollback Transaction 1 to release the lock
COMMIT;
-- Transaction for User A
START TRANSACTION;
SELECT balance FROM accounts WHERE account_id = 1 FOR UPDATE; -- Locks the row
IF balance >= 500 THEN
UPDATE accounts SET balance = balance - 500 WHERE account_id = 1;
END IF;
COMMIT;
-- Transaction for User B
START TRANSACTION;
SELECT balance FROM accounts WHERE account_id = 1 FOR UPDATE; -- Waits until User A's transaction
completes
IF balance >= 300 THEN
UPDATE accounts SET balance = balance - 300 WHERE account_id = 1;
END IF;
COMMIT;
from multiprocessing import Pool
def compute_payroll(employee_id):
print(f"Processing payroll for employee {employee_id}")
# Simulate computation
return f"Payroll computed for employee {employee_id}"
if __name__ == "__main__":
employees = [101, 102, 103, 104, 105]
# Create a pool of workers
with Pool(processes=4) as pool:
results = pool.map(compute_payroll, employees)
for result in results:
print(result)
from pyspark.sql import SparkSession
# Initialize Spark session
spark = SparkSession.builder.appName("PayrollProcessing").getOrCreate()
# Load employee data
data = [("John", 1000), ("Alice", 1200), ("Bob", 900), ("Jane", 1100)]
columns = ["Name", "Salary"]
employee_df = spark.createDataFrame(data, columns)
# Define a function to compute bonuses
def compute_bonus(salary):
return salary * 0.1 # 10% bonus
# Apply transformation in parallel
employee_df = employee_df.withColumn("Bonus", employee_df["Salary"] * 0.1)
# Show results
employee_df.show()
# Stop Spark session
spark.stop()
Name Salary Bonus
John 1000 100.0
Alice 1200 120.0
Bob 900 90.0
Jane 1100 110.0
"public static class TokenizerMapper extends Mapper<Object, Text, Text, IntWritable> {" (“MapReduce-
Demo/src/main/java/mapReduceTest/wordCount/WordCount ... - GitHub”)
private final static IntWritable one = new IntWritable(1);
private Text word = new Text();
public void map(Object key, Text value, Context context) throws IOException, InterruptedException {
StringTokenizer itr = new StringTokenizer(value.toString());
while (itr.hasMoreTokens()) {
word.set(itr.nextToken());
context.write(word, one);
}
}
}
"public static class IntSumReducer extends Reducer<Text, IntWritable, Text, IntWritable> {" (“Testing
your Hadoop program with Maven on IntelliJ - Medium”)
private IntWritable result = new IntWritable();
public void reduce(Text key, Iterable<IntWritable> values, Context context) throws IOException,
InterruptedException { (“WordCount.java - GitHub”)
int sum = 0;
for (IntWritable val : values) {
sum += val.get();
}
result.set(sum);
context.write(key, result);
}
}
version: '3.8'
services:
order-service:
image: order-service:latest
ports:
- "8081:8081"
depends_on:
- db
payment-service:
image: payment-service:latest
ports:
- "8082:8082"
depends_on:
- db
notification-service:
image: notification-service:latest
ports:
- "8083:8083"
db:
image: postgres:latest
environment:
POSTGRES_USER: admin
POSTGRES_PASSWORD: secret
import pika
def send_message():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
# Declare the queue
channel.queue_declare(queue='order_queue')
# Publish a message
message = "New order received"
channel.basic_publish(exchange='', routing_key='order_queue', body=message)
print(f"Sent: {message}")
connection.close()
if __name__ == "__main__":
send_message()
import pika
def receive_message():
connection = pika.BlockingConnection(pika.ConnectionParameters('localhost'))
channel = connection.channel()
# Declare the queue
channel.queue_declare(queue='order_queue')
def callback(ch, method, properties, body):
print(f"Received: {body.decode()}")
# Consume messages
channel.basic_consume(queue='order_queue', on_message_callback=callback, auto_ack=True)
print('Waiting for messages...')
channel.start_consuming()
if __name__ == "__main__":
receive_message()
import org.apache.kafka.clients.producer.*;
import java.util.Properties;
public class OrderProducer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = new KafkaProducer<>(props);
String topic = "order-events";
String key = "order1";
String value = "Order placed successfully";
producer.send(new ProducerRecord<>(topic, key, value), (metadata, exception) -> {
if (exception == null) {
System.out.println("Event sent: " + value);
} else {
exception.printStackTrace();
}
});
producer.close();
}
}
import org.apache.kafka.clients.consumer.*;
import java.util.Collections;
import java.util.Properties;
public class OrderConsumer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("group.id", "order-group");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
Consumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Collections.singletonList("order-events"));
while (true) {
ConsumerRecords<String, String> records = consumer.poll(100);
for (ConsumerRecord<String, String> record : records) {
System.out.printf("Received event: %s%n", record.value());
}
}
}
}
import time
# Simulated processes with execution times
processes = [("Process A", 5), ("Process B", 3), ("Process C", 4)]
time_quantum = 2 # Time slice per process
def round_robin(processes, time_quantum):
while processes:
process, time_left = processes.pop(0)
print(f"Running {process} for {min(time_left, time_quantum)} seconds...")
time.sleep(min(time_left, time_quantum)) # Simulate process execution time
remaining_time = time_left - time_quantum
if remaining_time > 0:
processes.append((process, remaining_time)) # Re-add process if more time is needed
else:
print(f"{process} has finished execution.")
# Simulate round-robin scheduling
round_robin(processes, time_quantum)
#include <stdio.h>
#include <signal.h>
#include <unistd.h>
// Simulate an interrupt service routine
void handle_interrupt(int signal) {
printf("Interrupt received! Handling the interrupt...\n");
}
int main() {
// Set up the interrupt handler for SIGINT (Ctrl+C)
signal(SIGINT, handle_interrupt);
printf("Program running. Press Ctrl+C to send an interrupt.\n");
// Simulate ongoing tasks in the system
while (1) {
printf("Performing task...\n");
sleep(1); // Simulate task processing
}
return 0;
}
from pyspark import SparkContext
# Initialize a Spark context
sc = SparkContext("local", "Distributed Computation Example")
# Create a distributed dataset (Resilient Distributed Dataset - RDD)
data = sc.parallelize([1, 2, 3, 4, 5])
# Perform a parallel computation (e.g., sum of squares)
squared_sum = data.map(lambda x: x ** 2).reduce(lambda a, b: a + b)
print(f"Sum of squares: {squared_sum}")
# Stop the Spark context
sc.stop()
import random
import time
from threading import Thread
class RaftNode:
def __init__(self, id):
self.id = id
self.state = "follower"
self.votes = 0
def start(self):
print(f"Node {self.id} started as {self.state}.")
while True:
if self.state == "follower":
# Simulating election timeout
time.sleep(random.uniform(1, 3))
self.state = "candidate"
self.votes = 1
print(f"Node {self.id} became a candidate and started election.")
self.elect_leader()
def elect_leader(self):
# Simulating voting process in Raft
if random.choice([True, False]): # Random decision to vote
print(f"Node {self.id} voted for a leader.")
self.votes += 1
if self.votes > 2: # Assume 3 nodes for simplicity
self.state = "leader"
print(f"Node {self.id} became the leader.")
# Create and start multiple Raft nodes
nodes = [RaftNode(id) for id in range(3)]
threads = [Thread(target=node.start) for node in nodes]
for thread in threads:
thread.start()
import time
import multiprocessing
# Simulate rendering a frame (just a placeholder for actual computation)
def render_frame(frame_number):
print(f"Rendering frame {frame_number}...")
time.sleep(0.5) # Simulating rendering time
print(f"Frame {frame_number} rendered.")
def render_video(total_frames):
# Create a pool of processes to render the frames concurrently
with multiprocessing.Pool(processes=4) as pool:
pool.map(render_frame, range(total_frames))
# Simulate rendering a video with 10 frames
render_video(10)
#include <iostream>
#include <cuda_runtime.h>
// A simple kernel to simulate rendering a pixel
__global__ void renderPixel(int *image, int width, int height) {
int x = blockIdx.x * blockDim.x + threadIdx.x;
int y = blockIdx.y * blockDim.y + threadIdx.y;
if (x < width && y < height) {
int index = y * width + x;
image[index] = x + y; // Placeholder for actual pixel color computation
}
}
int main() {
int width = 1920;
int height = 1080;
int image_size = width * height * sizeof(int);
int *d_image;
// Allocate memory on the GPU
cudaMalloc((void **)&d_image, image_size);
// Define grid and block size
dim3 threadsPerBlock(16, 16);
dim3 numBlocks((width + 15) / 16, (height + 15) / 16);
// Launch kernel to render the image
renderPixel<<<numBlocks, threadsPerBlock>>>(d_image, width, height);
// Copy the result back to host memory
int *h_image = new int[width * height];
cudaMemcpy(h_image, d_image, image_size, cudaMemcpyDeviceToHost);
// Clean up
cudaFree(d_image);
delete[] h_image;
std::cout << "Rendering complete!" << std::endl;
return 0;
}
import time
import multiprocessing
# Simulate vehicle movement (just a placeholder for actual movement logic)
def simulate_vehicle(vehicle_id):
print(f"Vehicle {vehicle_id} moving...")
time.sleep(0.2) # Simulating time taken for the vehicle to move
print(f"Vehicle {vehicle_id} reached destination.")
def simulate_traffic(total_vehicles):
# Create a pool of processes to simulate vehicle movements concurrently
with multiprocessing.Pool(processes=4) as pool:
pool.map(simulate_vehicle, range(total_vehicles))
# Simulate a traffic flow of 10 vehicles
simulate_traffic(10)
import threading
import time
# Simulate an agent (vehicle) moving through traffic
def vehicle_agent(vehicle_id):
print(f"Vehicle {vehicle_id} entering traffic...")
time.sleep(0.3) # Simulate time taken for the vehicle to make a move
print(f"Vehicle {vehicle_id} exited traffic.")
def simulate_traffic_agents(total_agents):
threads = []
for i in range(total_agents):
# Create a new thread for each agent
thread = threading.Thread(target=vehicle_agent, args=(i,))
threads.append(thread)
thread.start()
# Wait for all threads to complete
for thread in threads:
thread.join()
# Simulate 5 vehicle agents
simulate_traffic_agents(5)
import threading
import time
# Function to simulate processing video stream
def process_video_stream():
for i in range(5):
print("Processing video frame...")
time.sleep(0.5) # Simulating frame processing time
# Function to simulate processing audio stream
def process_audio_stream():
for i in range(5):
print("Processing audio data...")
time.sleep(0.3) # Simulating audio data processing time
# Function to simulate handling chat messages
def process_chat_messages():
for i in range(5):
print("Processing chat message...")
time.sleep(0.4) # Simulating message processing time
# Main function to simulate video call
def start_video_call():
# Creating threads for video, audio, and chat processing
video_thread = threading.Thread(target=process_video_stream)
audio_thread = threading.Thread(target=process_audio_stream)
chat_thread = threading.Thread(target=process_chat_messages)
# Starting the threads
video_thread.start()
audio_thread.start()
chat_thread.start()
# Waiting for all threads to complete
video_thread.join()
audio_thread.join()
chat_thread.join()
# Start the video call simulation
start_video_call()
import asyncio
# Function to simulate sending video stream
async def send_video_stream():
for i in range(5):
print("Sending video frame...")
await asyncio.sleep(0.5) # Simulating network delay
# Function to simulate receiving audio stream
async def receive_audio_stream():
for i in range(5):
print("Receiving audio data...")
await asyncio.sleep(0.3) # Simulating network delay
# Function to simulate real-time communication
async def start_real_time_communication():
# Running video and audio functions concurrently
video_task = asyncio.create_task(send_video_stream())
audio_task = asyncio.create_task(receive_audio_stream())
# Wait for both tasks to finish
await asyncio.gather(video_task, audio_task)
# Start real-time communication simulation
asyncio.run(start_real_time_communication())
# Build a Docker image for an application
docker build -t myapp /path/to/Dockerfile
# Run the first container
docker run -d --name container1 myapp
# Run the second container concurrently
docker run -d --name container2 myapp
# List all running containers
docker ps
# Create a new VM
VBoxManage createvm --name "VM1" --register
# Configure the VM (e.g., allocate 2 GB RAM)
VBoxManage modifyvm "VM1" --memory 2048
# Start the first VM
VBoxManage startvm "VM1" --type headless
# Create another VM and start it concurrently
VBoxManage createvm --name "VM2" --register
VBoxManage modifyvm "VM2" --memory 2048
VBoxManage startvm "VM2" --type headless