Thanks to visit codestin.com
Credit goes to github.com

Skip to content
Open
Changes from 1 commit
Commits
Show all changes
43 commits
Select commit Hold shift + click to select a range
1d5d128
Custom profiling commands & flamegraphs
oschaaf Apr 15, 2020
70b6e52
Docker linting, fix TODO
oschaaf Apr 16, 2020
93da236
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 16, 2020
81d4d7a
Docker lint tweak
oschaaf Apr 16, 2020
ee62a80
Add perf label to flamegraph filename
oschaaf Apr 16, 2020
689021b
Deduplicate redundant functionality
oschaaf Apr 16, 2020
a3f5587
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 17, 2020
bffad09
Dockerfile.perf linting: pin package versions
oschaaf Apr 17, 2020
6425935
bash linting: double quote arg
oschaaf Apr 17, 2020
81f4d4d
Add licence / copyright banner
oschaaf Apr 17, 2020
b37cea8
Python whitespace linting fix
oschaaf Apr 17, 2020
d5d5d59
Markdown linting fixes
oschaaf Apr 17, 2020
4871583
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 18, 2020
eb6090f
Move profiling thread start into function
oschaaf Apr 20, 2020
6730880
Python linting fix
oschaaf Apr 20, 2020
5e5ce41
Small fixes
oschaaf Apr 27, 2020
f265cfd
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 27, 2020
9e9320a
lint whitespace
oschaaf Apr 27, 2020
4dda5a9
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 27, 2020
1377ee3
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 28, 2020
2886e04
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 29, 2020
fa42cab
Sync up with the latest changes
oschaaf Apr 29, 2020
680b10c
linting fixes + fix in fortio.yaml
oschaaf Apr 29, 2020
30342ff
Changes ot minimize the diff
oschaaf Apr 29, 2020
0b9e851
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf Apr 30, 2020
2fccdd6
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf May 3, 2020
83a8e40
Tweak flamegraph file naming
oschaaf May 3, 2020
94f6120
Fix NH-mode --ingress option
oschaaf May 3, 2020
7f28c4c
Lint fix
oschaaf May 4, 2020
e274665
Small enhancenments/fixes
oschaaf May 4, 2020
b96687e
Fix hang, improve error handling. Doc enhancements.
oschaaf May 5, 2020
17d136a
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf May 5, 2020
aae1f19
Lint change in runner.py
oschaaf May 5, 2020
fdfe910
Flag for allowing short runs. Doc pagefault flamgraphing.
oschaaf May 5, 2020
47c63f5
runner.py: add --envoy_profiler option
oschaaf May 8, 2020
b2aa8f0
Lint fixes
oschaaf May 8, 2020
1c5a3dd
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf May 8, 2020
b19423b
Markdown lint fixes
oschaaf May 8, 2020
70e8f5a
Add scrape annotations for prom. node exporter
oschaaf May 13, 2020
e641309
network flakes in Ci: Add hard coded single retry per test execution
oschaaf May 13, 2020
2d71dd1
Remove line of code for debugging
oschaaf May 13, 2020
bce6a4f
Tweaks for bleeding edge istio
oschaaf May 13, 2020
50a2a63
Merge remote-tracking branch 'upstream/master' into profiling
oschaaf May 24, 2020
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
Prev Previous commit
Next Next commit
Merge remote-tracking branch 'upstream/master' into profiling
Signed-off-by: Otto van der Schaaf <[email protected]>
  • Loading branch information
oschaaf committed Apr 17, 2020
commit a3f5587b9ccdd654c44339f97b4dbabae7960038
81 changes: 71 additions & 10 deletions perf/benchmark/runner/runner.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,8 @@
import shlex
import uuid
import sys

import tempfile
import time
from subprocess import getoutput
from urllib.parse import urlparse
from threading import Thread
Expand Down Expand Up @@ -177,16 +178,16 @@ def compute_uri(self, svc, port_type):
sys.exit("invalid load generator %s, must be fortio or nighthawk", self.load_gen_type)

def nosidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "direct_port")
return load_gen_cmd + sidecar_mode + " " + self.compute_uri(self.server.ip, "direct_port")

def serversidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.ip, "port")
return load_gen_cmd + sidecar_mode + " " + self.compute_uri(self.server.ip, "port")

def clientsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "direct_port")
return load_gen_cmd + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "direct_port")

def bothsidecar(self, load_gen_cmd, sidecar_mode):
return load_gen_cmd + "_" + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "port")
return load_gen_cmd + sidecar_mode + " " + self.compute_uri(self.server.labels["app"], "port")

def ingress(self, load_gen_cmd):
url = urlparse(self.run_ingress)
Expand All @@ -200,9 +201,9 @@ def ingress(self, load_gen_cmd):
def execute_sidecar_mode(self, sidecar_mode, load_gen_type, load_gen_cmd, sidecar_mode_func, labels, perf_label_suffix):
print('-------------- Running in {sidecar_mode} mode --------------'.format(sidecar_mode=sidecar_mode))
if load_gen_type == "fortio":
kubectl_exec(self.client.name, sidecar_mode_func(load_gen_cmd, sidecar_mode))
kubectl_exec(self.client.name, sidecar_mode_func(load_gen_cmd, perf_label_suffix))
elif load_gen_type == "nighthawk":
run_nighthawk(self.client.name, sidecar_mode_func(load_gen_type, sidecar_mode), labels + "_" + sidecar_mode)
run_nighthawk(self.client.name, sidecar_mode_func(load_gen_cmd, perf_label_suffix), labels + perf_label_suffix)

def generate_test_labels(self, conn, qps, size):
size = size or self.size
Expand Down Expand Up @@ -304,6 +305,50 @@ def maybe_start_profiling_threads(self, labels, perf_label):

return threads

def generate_nighthawk_cmd(self, cpus, conn, qps, duration, labels):
nighthawk_args = [
"nighthawk_client",
"--concurrency {cpus}",
"--output-format json",
"--prefetch-connections",
"--open-loop",
"--jitter-uniform 0.0001s",
"--experimental-h1-connection-reuse-strategy lru",
"--experimental-h2-use-multiple-connections",
"--nighthawk-service 127.0.0.1:{port_forward}",
"--label Nighthawk",
"--connections {conn}",
"--rps {qps}",
"--duration {duration}",
"--request-header \"x-nighthawk-test-server-config: {{response_body_size:{size}}}\""
]

# Our "gRPC" mode actually means:
# - https (see get_protocol_uri_fragment())
# - h2
# - with long running connections
# - Also transfer request body sized according to "size".
if self.mode == "grpc":
nighthawk_args.append("--h2")
if self.size:
nighthawk_args.append(
"--request-header \"content-length: {size}\"")

# Note: Labels is the last arg, and there's stuff depending on that.
# watch out when moving it.
nighthawk_args.append("--label {labels}")

# As the worker count acts as a multiplier, we divide by qps/conn by the number of cpu's to spread load accross the workers so the sum of the workers will target the global qps/connection levels.
nighthawk_cmd = " ".join(nighthawk_args).format(
conn=round(conn / cpus),
qps=round(qps / cpus),
duration=duration,
labels=labels,
size=self.size,
cpus=cpus,
port_forward=NIGHTHAWK_GRPC_SERVICE_PORT_FORWARD)

return nighthawk_cmd

def run(self, headers, conn, qps, size, duration):
labels = self.generate_test_labels(conn, qps, size)
Expand All @@ -317,7 +362,23 @@ def run(self, headers, conn, qps, size, duration):
cacert_arg = "-cacert {cacert_path}".format(cacert_path=self.cacert)

headers_cmd = self.generate_headers_cmd(headers)
fortio_cmd = self.generate_fortio_cmd(headers_cmd, conn, qps, duration, grpc, cacert_arg, labels)

if self.load_gen_type == "fortio":
load_gen_cmd = self.generate_fortio_cmd(headers_cmd, conn, qps, duration, grpc, cacert_arg, labels)
elif self.load_gen_type == "nighthawk":
# TODO(oschaaf): Figure out how to best determine the right concurrency for Nighthawk.
# Results seem to get very noisy as the number of workers increases, are the clients
# and running on separate sets of vCPU cores? nproc yields the same concurrency as goprocs
# use with the Fortio version.
# client_cpus = int(run_command_sync(
# "kubectl exec -n \"{ns}\" svc/fortioclient -c shell nproc".format(ns=NAMESPACE)))
# print("Client pod has {client_cpus} cpus".format(client_cpus=client_cpus))

# See the comment above, we restrict execution to a single nighthawk worker for
# now to avoid noise.
workers = 1
load_gen_cmd = self.generate_nighthawk_cmd(workers, conn, qps, duration, labels)

perf_label = ""
sidecar_mode = ""
sidecar_mode_func = None
Expand Down Expand Up @@ -351,9 +412,9 @@ def run(self, headers, conn, qps, size, duration):

if self.run_ingress:
print('-------------- Running in ingress mode --------------')
kubectl_exec(self.client.name, self.ingress(fortio_cmd))
kubectl_exec(self.client.name, self.ingress(load_gen_cmd))
else:
self.execute_sidecar_mode(sidecar_mode, self.load_gen_type, fortio_cmd, sidecar_mode_func, labels, perf_label)
self.execute_sidecar_mode(sidecar_mode, self.load_gen_type, load_gen_cmd, sidecar_mode_func, labels, perf_label)

if len(threads) > 0:
if self.custom_profiling_command:
Expand Down
You are viewing a condensed version of this merge commit. You can view the full changes here.