Thanks to visit codestin.com
Credit goes to github.com

Skip to content

Commit cfe4ca8

Browse files
authored
Merge pull request kubernetes#73934 from bsalamat/num_cpu
Use runtime.NumCPU() instead of a fixed value for parallel scheduler threads
2 parents f160356 + d0ebeef commit cfe4ca8

File tree

3 files changed

+9
-6
lines changed

3 files changed

+9
-6
lines changed

pkg/scheduler/algorithm/predicates/metadata.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@ package predicates
1919
import (
2020
"context"
2121
"fmt"
22+
"runtime"
2223
"sync"
2324

2425
"k8s.io/klog"
@@ -415,7 +416,7 @@ func getTPMapMatchingExistingAntiAffinity(pod *v1.Pod, nodeInfoMap map[string]*s
415416
appendTopologyPairsMaps(existingPodTopologyMaps)
416417
}
417418
}
418-
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
419+
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode)
419420
return topologyMaps, firstError
420421
}
421422

@@ -503,7 +504,7 @@ func getTPMapMatchingIncomingAffinityAntiAffinity(pod *v1.Pod, nodeInfoMap map[s
503504
appendResult(node.Name, nodeTopologyPairsAffinityPodsMaps, nodeTopologyPairsAntiAffinityPodsMaps)
504505
}
505506
}
506-
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
507+
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode)
507508
return topologyPairsAffinityPodsMaps, topologyPairsAntiAffinityPodsMaps, firstError
508509
}
509510

pkg/scheduler/algorithm/priorities/interpod_affinity.go

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -18,6 +18,7 @@ package priorities
1818

1919
import (
2020
"context"
21+
"runtime"
2122
"sync"
2223

2324
"k8s.io/api/core/v1"
@@ -211,7 +212,7 @@ func (ipa *InterPodAffinity) CalculateInterPodAffinityPriority(pod *v1.Pod, node
211212
}
212213
}
213214
}
214-
workqueue.ParallelizeUntil(context.TODO(), 16, len(allNodeNames), processNode)
215+
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(allNodeNames), processNode)
215216
if pm.firstError != nil {
216217
return nil, pm.firstError
217218
}

pkg/scheduler/core/generic_scheduler.go

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -20,6 +20,7 @@ import (
2020
"context"
2121
"fmt"
2222
"math"
23+
"runtime"
2324
"sort"
2425
"strings"
2526
"sync"
@@ -489,7 +490,7 @@ func (g *genericScheduler) findNodesThatFit(pod *v1.Pod, nodes []*v1.Node) ([]*v
489490

490491
// Stops searching for more nodes once the configured number of feasible nodes
491492
// are found.
492-
workqueue.ParallelizeUntil(ctx, 16, int(allNodes), checkNode)
493+
workqueue.ParallelizeUntil(ctx, runtime.NumCPU(), int(allNodes), checkNode)
493494

494495
filtered = filtered[:filteredLen]
495496
if len(errs) > 0 {
@@ -695,7 +696,7 @@ func PrioritizeNodes(
695696
}
696697
}
697698

698-
workqueue.ParallelizeUntil(context.TODO(), 16, len(nodes), func(index int) {
699+
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(nodes), func(index int) {
699700
nodeInfo := nodeNameToInfo[nodes[index].Name]
700701
for i := range priorityConfigs {
701702
if priorityConfigs[i].Function != nil {
@@ -943,7 +944,7 @@ func selectNodesForPreemption(pod *v1.Pod,
943944
resultLock.Unlock()
944945
}
945946
}
946-
workqueue.ParallelizeUntil(context.TODO(), 16, len(potentialNodes), checkNode)
947+
workqueue.ParallelizeUntil(context.TODO(), runtime.NumCPU(), len(potentialNodes), checkNode)
947948
return nodeToVictims, nil
948949
}
949950

0 commit comments

Comments
 (0)