Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit c26d625

Browse files
author
Kubernetes Submit Queue
authored
Merge pull request kubernetes#37313 from roberthbailey/automated-cherry-pick-of-#36623-upstream-release-1.3
Automatic merge from submit-queue Automated cherry pick of kubernetes#36623 Cherry pick of kubernetes#36623 on release-1.3. kubernetes#36623: Use generous limits in the resource usage tracking tests
2 parents 2f65865 + edd0780 commit c26d625

File tree

1 file changed

+15
-23
lines changed

1 file changed

+15
-23
lines changed

test/e2e/kubelet_perf.go

Lines changed: 15 additions & 23 deletions
Original file line numberDiff line numberDiff line change
@@ -226,43 +226,35 @@ var _ = framework.KubeDescribe("Kubelet [Serial] [Slow]", func() {
226226
// initialization. This *noise* is obvious when N is small. We
227227
// deliberately set higher resource usage limits to account for the
228228
// noise.
229+
//
230+
// We set all resource limits generously because this test is mainly
231+
// used to catch resource leaks in the soak cluster. For tracking
232+
// kubelet/runtime resource usage, please see the node e2e benchmark
233+
// dashboard. http://node-perf-dash.k8s.io/
234+
//
235+
// TODO(#36621): Deprecate this test once we have a node e2e soak
236+
// cluster.
229237
rTests := []resourceTest{
230238
{
231239
podsPerNode: 0,
232240
cpuLimits: framework.ContainersCPUSummary{
233-
stats.SystemContainerKubelet: {0.50: 0.06, 0.95: 0.08},
234-
stats.SystemContainerRuntime: {0.50: 0.05, 0.95: 0.06},
241+
stats.SystemContainerKubelet: {0.50: 0.10, 0.95: 0.20},
242+
stats.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.20},
235243
},
236-
// We set the memory limits generously because the distribution
237-
// of the addon pods affect the memory usage on each node.
238244
memLimits: framework.ResourceUsagePerContainer{
239245
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024},
240-
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 85 * 1024 * 1024},
246+
// The detail can be found at https://github.com/kubernetes/kubernetes/issues/28384#issuecomment-244158892
247+
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 125 * 1024 * 1024},
241248
},
242249
},
243250
{
244-
podsPerNode: 35,
245251
cpuLimits: framework.ContainersCPUSummary{
246-
stats.SystemContainerKubelet: {0.50: 0.12, 0.95: 0.14},
247-
stats.SystemContainerRuntime: {0.50: 0.05, 0.95: 0.07},
248-
},
249-
// We set the memory limits generously because the distribution
250-
// of the addon pods affect the memory usage on each node.
251-
memLimits: framework.ResourceUsagePerContainer{
252-
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 70 * 1024 * 1024},
253-
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 150 * 1024 * 1024},
254-
},
255-
},
256-
{
257-
cpuLimits: framework.ContainersCPUSummary{
258-
stats.SystemContainerKubelet: {0.50: 0.17, 0.95: 0.22},
259-
stats.SystemContainerRuntime: {0.50: 0.06, 0.95: 0.09},
252+
stats.SystemContainerKubelet: {0.50: 0.35, 0.95: 0.50},
253+
stats.SystemContainerRuntime: {0.50: 0.10, 0.95: 0.50},
260254
},
261255
podsPerNode: 100,
262-
// We set the memory limits generously because the distribution
263-
// of the addon pods affect the memory usage on each node.
264256
memLimits: framework.ResourceUsagePerContainer{
265-
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 80 * 1024 * 1024},
257+
stats.SystemContainerKubelet: &framework.ContainerResourceUsage{MemoryRSSInBytes: 100 * 1024 * 1024},
266258
stats.SystemContainerRuntime: &framework.ContainerResourceUsage{MemoryRSSInBytes: 300 * 1024 * 1024},
267259
},
268260
},

0 commit comments

Comments
 (0)