#!/usr/bin/env python
# coding: utf-8
#
#
#
# # Data Generation and Aggregation with Python's Faker Library and PySpark
#
#
#
# Explore the capabilities of the Python Faker library (https://faker.readthedocs.io/) for dynamic data generation!
#
# Whether you're a data scientist, engineer, or analyst, this tutorial will guide you through the process of creating realistic and diverse datasets using Faker and then harnessing the distributed computing capabilities of PySpark to aggregate and analyze the generated data. Throughout this guide, you will explore effective techniques for data generation that enhance performance and optimize resource usage. Whether you're working with large datasets or simply seeking to streamline your data generation process, this tutorial offers valuable insights to elevate your skills.
#
# **Note:** This is not _synthetic_ data, as it is generated using straightforward methods and is unlikely to conform to any real-life distribution. Still, it serves as a valuable resource for testing purposes when authentic data is unavailable.
# # Install Faker
#
# The Python `faker` module needs to be installed. Note that on Google Colab you can use `!pip` as well as just `pip` (no exclamation mark).
# In[1]:
get_ipython().system('pip install faker')
# # Generate a Pandas dataframe with fake data
# Import `Faker` and set a random seed ($42$).
# In[2]:
from faker import Faker
# Set the seed value of the shared `random.Random` object
# across all internal generators that will ever be created
Faker.seed(42)
# `fake` is a fake data generator with `DE_de` locale.
# In[3]:
fake = Faker('de_DE')
fake.seed_locale('de_DE', 42)
# Creates and seeds a unique `random.Random` object for
# each internal generator of this `Faker` instance
fake.seed_instance(42)
# With `fake` you can generate fake data, such as name, email, etc.
# In[4]:
print(f"A fake name: {fake.name()}")
print(f"A fake email: {fake.email()}")
# Import Pandas to save data into a dataframe
# In[5]:
# true if running on Google Colab
import sys
IN_COLAB = 'google.colab' in sys.modules
if not IN_COLAB:
get_ipython().system('pip install pandas==1.5.3')
import pandas as pd
# The function `create_row_faker` creates one row of fake data. Here we choose to generate a row containing the following fields:
# - `fake.name()`
# - `fake.postcode()`
# - `fake.email()`
# - `fake.country()`.
# In[6]:
def create_row_faker(num=1):
fake = Faker('de_DE')
fake.seed_locale('de_DE', 42)
fake.seed_instance(42)
output = [{"name": fake.name(),
"age": fake.random_int(0, 100),
"postcode": fake.postcode(),
"email": fake.email(),
"nationality": fake.country(),
} for x in range(num)]
return output
# Generate a single row
# In[7]:
create_row_faker()
# Generate `n=3` rows
# In[8]:
create_row_faker(3)
# Generate a dataframe `df_fake` of 5000 rows using `create_row_faker`.
#
# We're using the _cell magic_ `%%time` to time the operation.
# In[9]:
get_ipython().run_cell_magic('time', '', 'df_fake = pd.DataFrame(create_row_faker(5000))\n')
# View dataframe
# In[10]:
df_fake
# For more fake data generators see Faker's [standard providers](https://faker.readthedocs.io/en/master/providers.html#standard-providers) as well as [community providers](https://faker.readthedocs.io/en/master/communityproviders.html#community-providers).
# # Generate PySpark dataframe with fake data
# Install PySpark.
# In[11]:
get_ipython().system('pip install pyspark')
# In[12]:
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Faker demo") \
.getOrCreate()
# In[13]:
df = spark.createDataFrame(create_row_faker(5000))
# To avoid getting the warning, either use [pyspark.sql.Row](https://spark.apache.org/docs/latest/api/python/pyspark.sql.html#pyspark.sql.Row) and let Spark infer datatypes or create a schema for the dataframe specifying the datatypes of all fields (here's the list of all [datatypes](http://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=types#module-pyspark.sql.types)).
# In[14]:
from pyspark.sql.types import *
schema = StructType([StructField('name', StringType()),
StructField('age',IntegerType()),
StructField('postcode',StringType()),
StructField('email', StringType()),
StructField('nationality',StringType())])
# In[15]:
df = spark.createDataFrame(create_row_faker(5000), schema)
# In[16]:
df.printSchema()
# Let's generate some more data (dataframe with $5\cdot10^4$ rows). The file will be partitioned by Spark.
# In[17]:
get_ipython().run_cell_magic('time', '', 'n = 5*10**4\ndf = spark.createDataFrame(create_row_faker(n), schema)\n')
# In[18]:
df.show(10, truncate=False)
# It took a long time (~4 sec. for 50000 rows)!
#
# Can we do better?
# The function `create_row_faker()` returns a list. This is not efficient, what we need is a _generator_ instead.
# In[19]:
d = create_row_faker(5)
# what type is d?
type(d)
# Let us turn `d` into a generator
# In[20]:
d = ({"name": fake.name(),
"age": fake.random_int(0, 100),
"postcode": fake.postcode(),
"email": fake.email(),
"nationality": fake.country()} for i in range(5))
# what type is d?
type(d)
# In[21]:
get_ipython().run_cell_magic('time', '', 'n = 5*10**4\nfake = Faker(\'de_DE\')\nfake.seed_locale(\'de_DE\', 42)\nfake.seed_instance(42)\nd = ({"name": fake.name(),\n "age": fake.random_int(0, 100),\n "postcode": fake.postcode(),\n "email": fake.email(),\n "nationality": fake.country()}\n for i in range(n))\ndf = spark.createDataFrame(d, schema)\n')
# In[22]:
df.show(10, truncate=False)
# This wasn't faster.
#
# Let us look at how one can leverage Hadoop's parallelism to generate dataframes and speed up the process.
# ## A more efficient way to generate a large amount of records
#
# We are going to use Spark's RDD and the function `parallelize`. In order to do this, we are going to need to extract the Spark _context_ from the current session.
# In[23]:
sc = spark.sparkContext
sc
# In order to decide on the number of partitions, we are going to look at the number of (virtual) CPU's on the local machine. If you have a cluster you can have a larger number of CPUs across multiple nodes but this is not the case here.
#
# The standard Google Colab virtual machine has $2$ virtual CPUs (one CPU with two threads), so that is the maximum parallelization that you can achieve.
#
# **Note:**
#
# CPUs = threads per core × cores per socket × sockets
# In[24]:
get_ipython().system("lscpu | grep -E '^Thread|^Core|^Socket|^CPU\\('")
# Due to the limited number of CPUs on this machine, we'll use only $2$ partitions. Even so, the data generation timing has improved dramatically!
# In[25]:
get_ipython().run_cell_magic('time', '', 'n = 5*10**4\nnum_partitions = 2\n# Create an empty RDD with the specified number of partitions\nempty_rdd = sc.parallelize(range(num_partitions), num_partitions)\n# Define a function that will run on each partition to generate the fake data\ndef generate_fake_data(_):\n fake = Faker(\'de_DE\') # Create a new Faker instance per partition\n fake.seed_locale(\'de_DE\', 42)\n fake.seed_instance(42)\n for _ in range(n // num_partitions): # Divide work across partitions\n yield {\n "name": fake.name(),\n "age": fake.random_int(0, 100),\n "postcode": fake.postcode(),\n "email": fake.email(),\n "nationality": fake.country()\n }\n\n# Use mapPartitions to generate fake data for each partition\nrdd = empty_rdd.mapPartitions(generate_fake_data)\n# Convert the RDD to a DataFrame\ndf = rdd.toDF()\n')
# I'm convinced that the reason everyone always looks at the first $5$ rows in Spark's RDDs is an homage to the classic jazz piece 🎷🎶.
# In[26]:
rdd.take(5)
# In[27]:
df.show()
# # Filter and aggregate with PySpark
# Show the first five records in the dataframe of fake data.
# In[28]:
df.show(n=5, truncate=False)
# Do some data aggregation:
# - group by postcode
# - count the number of persons and the average age for each postcode
# - filter out postcodes with less than 4 persons
# - sort by average age descending
# - show the first 5 entries
# In[29]:
import pyspark.sql.functions as F
df.groupBy('postcode') \
.agg(F.count('postcode').alias('Count'), F.round(F.avg('age'), 2).alias('Average age')) \
.filter('Count>3') \
.orderBy('Average age', ascending=False) \
.show(5)
# Postcode $18029$ has the highest average age ($91.75$). Show all entries for postcode $18029$ using `filter`.
# In[30]:
df.filter('postcode==18029').show(truncate=False)
# # Another example with multiple locales and weights
#
# We are going to use multiple locales with weights (following the [examples](https://faker.readthedocs.io/en/master/fakerclass.html#examples) in the documentation).
#
# Here's the [list of all available locales](https://faker.readthedocs.io/en/master/locales.html).
# In[31]:
from faker import Faker
# set a seed for the random generator
Faker.seed(0)
# Generate data with locales `de_DE` and `de_AT` with weights respectively $5$ and $2$.
#
# The distribution of locales will be:
# - `de_DE` - $71.43\%$ of the time ($5 / (5+2)$)
# - `de_AT` - $28.57\%$ of the time ($2 / (5+2)$)
#
# In[32]:
from collections import OrderedDict
locales = OrderedDict([
('de_DE', 5),
('de_AT', 2),
])
fake = Faker(locales)
fake.seed_instance(42)
fake.locales
# In[33]:
fake.seed_locale('de_DE', 0)
fake.seed_locale('de_AT', 0)
# In[34]:
fake.profile(fields=['name', 'birthdate', 'sex', 'blood_group',
'mail', 'current_location'])
# In[35]:
from pyspark.sql.types import *
location = StructField('current_location',
StructType([StructField('lat', DecimalType()),
StructField('lon', DecimalType())])
)
schema = StructType([StructField('name', StringType()),
StructField('birthdate', DateType()),
StructField('sex', StringType()),
StructField('blood_group', StringType()),
StructField('mail', StringType()),
location
])
# In[36]:
fake.profile(fields=['name', 'birthdate', 'sex', 'blood_group',
'mail', 'current_location'])
# In[37]:
from pyspark.sql import SparkSession
spark = SparkSession \
.builder \
.appName("Faker demo - part 2") \
.getOrCreate()
# Create dataframe with $5\cdot10^3$ rows.
# In[38]:
get_ipython().run_cell_magic('time', '', "n = 5*10**3\nd = (fake.profile(fields=['name', 'birthdate', 'sex', 'blood_group',\n 'mail', 'current_location'])\n for i in range(n))\ndf = spark.createDataFrame(d, schema)\n")
# In[39]:
df.printSchema()
# Note how `location` represents a _tuple_ data structure (a `StructType` of `StructField`s).
# In[40]:
df.show(n=10, truncate=False)
# # Save to Parquet
# [Write to parquet](https://spark.apache.org/docs/latest/api/python/pyspark.sql.html?highlight=write#pyspark.sql.DataFrameWriter.parquet) file ([Parquet](http://parquet.apache.org/) is a compressed, efficient columnar data representation compatible with all frameworks in the Hadoop ecosystem):
# In[41]:
df.write.mode("overwrite").parquet("fakedata.parquet")
# Check the size of parquet file (it is actually a directory containing the partitions):
# In[42]:
get_ipython().system('du -h fakedata.parquet')
# In[43]:
get_ipython().system('ls -lh fakedata.parquet')
# # Stop Spark session
# Don't forget to close the Spark session when you're done!
# ## Why you should stop your Spark session
#
# Even when no jobs are running, the Spark session holds memory resources, that get released only when the session is properly stopped.
# In[44]:
# Function to check memory usage
import subprocess
def get_memory_usage_ratio():
# Run the 'free -h' command
result = subprocess.run(['free', '-h'], stdout=subprocess.PIPE, text=True)
# Parse the output
lines = result.stdout.splitlines()
# Initialize used and total memory
used_memory = None
total_memory = None
# The second line contains the memory information
if len(lines) > 1:
# Split the line into parts
memory_parts = lines[1].split()
total_memory = memory_parts[1] # Total memory
used_memory = memory_parts[2] # Used memory
return used_memory, total_memory
# Stop the session and compare.
# In[45]:
# Check memory usage before stopping the Spark session
used_memory, total_memory = get_memory_usage_ratio()
print(f"Memory used before stopping Spark session: {used_memory}")
print(f"Total Memory: {total_memory}")
# In[46]:
# Stop the Spark session
spark.stop()
# Check memory usage after stopping the Spark session
used_memory, total_memory = get_memory_usage_ratio()
print(f"Memory used after stopping Spark session: {used_memory}")
print(f"Total Memory: {total_memory}")
# The amount of memory released may not be impressive in this case, but holding onto unnecessary resources is inefficient. Also, memory waste can add up quickly when multiple sessions are running.