Skip to content

Navigation Menu

Sign in
Appearance settings

Search code, repositories, users, issues, pull requests...

Provide feedback

We read every piece of feedback, and take your input very seriously.

Saved searches

Use saved searches to filter your results more quickly

Appearance settings

Commit 6b6da95

Browse filesBrowse files
committed
Static CPU management policy alongside InPlacePodVerticalScaling
1 parent 7140b49 commit 6b6da95
Copy full SHA for 6b6da95

File tree

Expand file treeCollapse file tree

12 files changed

+2798
-82
lines changed
Filter options
Expand file treeCollapse file tree

12 files changed

+2798
-82
lines changed

‎pkg/kubelet/cm/cpumanager/cpu_assignment.go

Copy file name to clipboardExpand all lines: pkg/kubelet/cm/cpumanager/cpu_assignment.go
+63-11Lines changed: 63 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -294,14 +294,51 @@ type cpuAccumulator struct {
294294
availableCPUSorter availableCPUSorter
295295
}
296296

297-
func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy) *cpuAccumulator {
297+
func newCPUAccumulator(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy, reusableCPUsForResize *cpuset.CPUSet, mustKeepCPUsForScaleDown *cpuset.CPUSet) *cpuAccumulator {
298298
acc := &cpuAccumulator{
299299
topo: topo,
300300
details: topo.CPUDetails.KeepOnly(availableCPUs),
301301
numCPUsNeeded: numCPUs,
302302
result: cpuset.New(),
303303
}
304304

305+
if reusableCPUsForResize != nil {
306+
if !reusableCPUsForResize.IsEmpty() {
307+
// Increase of CPU resources ( scale up )
308+
// Take existing from allocated
309+
// CPUs
310+
if numCPUs > reusableCPUsForResize.Size() {
311+
// scale up ...
312+
acc.take(reusableCPUsForResize.Clone())
313+
}
314+
315+
// Decrease of CPU resources ( scale down )
316+
// Take delta from allocated CPUs, if mustKeepCPUsForScaleDown
317+
// is not nil, use explicetely those. If it is nil
318+
// take delta starting from lowest CoreId of CPUs ( TODO esotsal, perhaps not needed).
319+
if numCPUs < reusableCPUsForResize.Size() {
320+
if mustKeepCPUsForScaleDown != nil {
321+
// If explicetely CPUs to keep
322+
// during scale down is given ( this requires
323+
// addition in container[].resources ... which
324+
// could be possible to patch ? Esotsal Note This means
325+
// modifying API code
326+
if !(mustKeepCPUsForScaleDown.Intersection(reusableCPUsForResize.Clone())).IsEmpty() {
327+
acc.take(mustKeepCPUsForScaleDown.Clone())
328+
} else {
329+
return acc
330+
}
331+
}
332+
}
333+
334+
if numCPUs == reusableCPUsForResize.Size() {
335+
// nothing to do return as is
336+
acc.take(reusableCPUsForResize.Clone())
337+
return acc
338+
}
339+
}
340+
}
341+
305342
if topo.NumSockets >= topo.NumNUMANodes {
306343
acc.numaOrSocketsFirst = &numaFirst{acc}
307344
} else {
@@ -750,15 +787,23 @@ func (a *cpuAccumulator) iterateCombinations(n []int, k int, f func([]int) LoopC
750787
// the least amount of free CPUs to the one with the highest amount of free CPUs (i.e. in ascending
751788
// order of free CPUs). For any NUMA node, the cores are selected from the ones in the socket with
752789
// the least amount of free CPUs to the one with the highest amount of free CPUs.
753-
func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy, preferAlignByUncoreCache bool) (cpuset.CPUSet, error) {
754-
acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy)
790+
func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuSortingStrategy CPUSortingStrategy, preferAlignByUncoreCache bool, reusableCPUsForResize *cpuset.CPUSet, mustKeepCPUsForScaleDown *cpuset.CPUSet) (cpuset.CPUSet, error) {
791+
792+
// If the number of CPUs requested to be retained is not a subset
793+
// of reusableCPUs, then we fail early
794+
if reusableCPUsForResize != nil && mustKeepCPUsForScaleDown != nil {
795+
if (mustKeepCPUsForScaleDown.Intersection(reusableCPUsForResize.Clone())).IsEmpty() {
796+
return cpuset.New(), fmt.Errorf("requested CPUs to be retained %s are not a subset of reusable CPUs %s", mustKeepCPUsForScaleDown.String(), reusableCPUsForResize.String())
797+
}
798+
}
799+
800+
acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy, reusableCPUsForResize, mustKeepCPUsForScaleDown)
755801
if acc.isSatisfied() {
756802
return acc.result, nil
757803
}
758804
if acc.isFailed() {
759805
return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request: requested=%d, available=%d", numCPUs, availableCPUs.Size())
760806
}
761-
762807
// Algorithm: topology-aware best-fit
763808
// 1. Acquire whole NUMA nodes and sockets, if available and the container
764809
// requires at least a NUMA node or socket's-worth of CPUs. If NUMA
@@ -867,25 +912,32 @@ func takeByTopologyNUMAPacked(topo *topology.CPUTopology, availableCPUs cpuset.C
867912
// of size 'cpuGroupSize' according to the algorithm described above. This is
868913
// important, for example, to ensure that all CPUs (i.e. all hyperthreads) from
869914
// a single core are allocated together.
870-
func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuGroupSize int, cpuSortingStrategy CPUSortingStrategy) (cpuset.CPUSet, error) {
915+
func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpuset.CPUSet, numCPUs int, cpuGroupSize int, cpuSortingStrategy CPUSortingStrategy, reusableCPUsForResize *cpuset.CPUSet, mustKeepCPUsForScaleDown *cpuset.CPUSet) (cpuset.CPUSet, error) {
871916
// If the number of CPUs requested cannot be handed out in chunks of
872917
// 'cpuGroupSize', then we just call out the packing algorithm since we
873918
// can't distribute CPUs in this chunk size.
874919
// PreferAlignByUncoreCache feature not implemented here yet and set to false.
875920
// Support for PreferAlignByUncoreCache to be done at beta release.
876921
if (numCPUs % cpuGroupSize) != 0 {
877-
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false)
922+
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false, reusableCPUsForResize, mustKeepCPUsForScaleDown)
923+
}
924+
925+
// If the number of CPUs requested to be retained is not a subset
926+
// of reusableCPUs, then we fail early
927+
if reusableCPUsForResize != nil && mustKeepCPUsForScaleDown != nil {
928+
if (mustKeepCPUsForScaleDown.Intersection(reusableCPUsForResize.Clone())).IsEmpty() {
929+
return cpuset.New(), fmt.Errorf("requested CPUs to be retained %s are not a subset of reusable CPUs %s", mustKeepCPUsForScaleDown.String(), reusableCPUsForResize.String())
930+
}
878931
}
879932

880933
// Otherwise build an accumulator to start allocating CPUs from.
881-
acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy)
934+
acc := newCPUAccumulator(topo, availableCPUs, numCPUs, cpuSortingStrategy, reusableCPUsForResize, mustKeepCPUsForScaleDown)
882935
if acc.isSatisfied() {
883936
return acc.result, nil
884937
}
885938
if acc.isFailed() {
886939
return cpuset.New(), fmt.Errorf("not enough cpus available to satisfy request: requested=%d, available=%d", numCPUs, availableCPUs.Size())
887940
}
888-
889941
// Get the list of NUMA nodes represented by the set of CPUs in 'availableCPUs'.
890942
numas := acc.sortAvailableNUMANodes()
891943

@@ -1057,7 +1109,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
10571109
// size 'cpuGroupSize' from 'bestCombo'.
10581110
distribution := (numCPUs / len(bestCombo) / cpuGroupSize) * cpuGroupSize
10591111
for _, numa := range bestCombo {
1060-
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), distribution, cpuSortingStrategy, false)
1112+
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), distribution, cpuSortingStrategy, false, reusableCPUsForResize, mustKeepCPUsForScaleDown)
10611113
acc.take(cpus)
10621114
}
10631115

@@ -1072,7 +1124,7 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
10721124
if acc.details.CPUsInNUMANodes(numa).Size() < cpuGroupSize {
10731125
continue
10741126
}
1075-
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize, cpuSortingStrategy, false)
1127+
cpus, _ := takeByTopologyNUMAPacked(acc.topo, acc.details.CPUsInNUMANodes(numa), cpuGroupSize, cpuSortingStrategy, false, reusableCPUsForResize, mustKeepCPUsForScaleDown)
10761128
acc.take(cpus)
10771129
remainder -= cpuGroupSize
10781130
}
@@ -1096,5 +1148,5 @@ func takeByTopologyNUMADistributed(topo *topology.CPUTopology, availableCPUs cpu
10961148

10971149
// If we never found a combination of NUMA nodes that we could properly
10981150
// distribute CPUs across, fall back to the packing algorithm.
1099-
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false)
1151+
return takeByTopologyNUMAPacked(topo, availableCPUs, numCPUs, cpuSortingStrategy, false, reusableCPUsForResize, mustKeepCPUsForScaleDown)
11001152
}

‎pkg/kubelet/cm/cpumanager/cpu_assignment_test.go

Copy file name to clipboardExpand all lines: pkg/kubelet/cm/cpumanager/cpu_assignment_test.go
+9-9Lines changed: 9 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -114,7 +114,7 @@ func TestCPUAccumulatorFreeSockets(t *testing.T) {
114114

115115
for _, tc := range testCases {
116116
t.Run(tc.description, func(t *testing.T) {
117-
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked)
117+
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked, nil, nil)
118118
result := acc.freeSockets()
119119
sort.Ints(result)
120120
if !reflect.DeepEqual(result, tc.expect) {
@@ -214,7 +214,7 @@ func TestCPUAccumulatorFreeNUMANodes(t *testing.T) {
214214

215215
for _, tc := range testCases {
216216
t.Run(tc.description, func(t *testing.T) {
217-
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked)
217+
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked, nil, nil)
218218
result := acc.freeNUMANodes()
219219
if !reflect.DeepEqual(result, tc.expect) {
220220
t.Errorf("expected %v to equal %v", result, tc.expect)
@@ -263,7 +263,7 @@ func TestCPUAccumulatorFreeSocketsAndNUMANodes(t *testing.T) {
263263

264264
for _, tc := range testCases {
265265
t.Run(tc.description, func(t *testing.T) {
266-
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked)
266+
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked, nil, nil)
267267
resultNUMANodes := acc.freeNUMANodes()
268268
if !reflect.DeepEqual(resultNUMANodes, tc.expectNUMANodes) {
269269
t.Errorf("expected NUMA Nodes %v to equal %v", resultNUMANodes, tc.expectNUMANodes)
@@ -335,7 +335,7 @@ func TestCPUAccumulatorFreeCores(t *testing.T) {
335335

336336
for _, tc := range testCases {
337337
t.Run(tc.description, func(t *testing.T) {
338-
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked)
338+
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked, nil, nil)
339339
result := acc.freeCores()
340340
if !reflect.DeepEqual(result, tc.expect) {
341341
t.Errorf("expected %v to equal %v", result, tc.expect)
@@ -391,7 +391,7 @@ func TestCPUAccumulatorFreeCPUs(t *testing.T) {
391391

392392
for _, tc := range testCases {
393393
t.Run(tc.description, func(t *testing.T) {
394-
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked)
394+
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, 0, CPUSortingStrategyPacked, nil, nil)
395395
result := acc.freeCPUs()
396396
if !reflect.DeepEqual(result, tc.expect) {
397397
t.Errorf("expected %v to equal %v", result, tc.expect)
@@ -477,7 +477,7 @@ func TestCPUAccumulatorTake(t *testing.T) {
477477

478478
for _, tc := range testCases {
479479
t.Run(tc.description, func(t *testing.T) {
480-
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs, CPUSortingStrategyPacked)
480+
acc := newCPUAccumulator(tc.topo, tc.availableCPUs, tc.numCPUs, CPUSortingStrategyPacked, nil, nil)
481481
totalTaken := 0
482482
for _, cpus := range tc.takeCPUs {
483483
acc.take(cpus)
@@ -750,7 +750,7 @@ func TestTakeByTopologyNUMAPacked(t *testing.T) {
750750
strategy = CPUSortingStrategySpread
751751
}
752752

753-
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy, tc.opts.PreferAlignByUncoreCacheOption)
753+
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy, tc.opts.PreferAlignByUncoreCacheOption, nil, nil)
754754
if tc.expErr != "" && err != nil && err.Error() != tc.expErr {
755755
t.Errorf("expected error to be [%v] but it was [%v]", tc.expErr, err)
756756
}
@@ -851,7 +851,7 @@ func TestTakeByTopologyWithSpreadPhysicalCPUsPreferredOption(t *testing.T) {
851851
if tc.opts.DistributeCPUsAcrossCores {
852852
strategy = CPUSortingStrategySpread
853853
}
854-
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy, tc.opts.PreferAlignByUncoreCacheOption)
854+
result, err := takeByTopologyNUMAPacked(tc.topo, tc.availableCPUs, tc.numCPUs, strategy, tc.opts.PreferAlignByUncoreCacheOption, nil, nil)
855855
if tc.expErr != "" && err.Error() != tc.expErr {
856856
t.Errorf("testCase %q failed, expected error to be [%v] but it was [%v]", tc.description, tc.expErr, err)
857857
}
@@ -1053,7 +1053,7 @@ func TestTakeByTopologyNUMADistributed(t *testing.T) {
10531053

10541054
for _, tc := range testCases {
10551055
t.Run(tc.description, func(t *testing.T) {
1056-
result, err := takeByTopologyNUMADistributed(tc.topo, tc.availableCPUs, tc.numCPUs, tc.cpuGroupSize, CPUSortingStrategyPacked)
1056+
result, err := takeByTopologyNUMADistributed(tc.topo, tc.availableCPUs, tc.numCPUs, tc.cpuGroupSize, CPUSortingStrategyPacked, nil, nil)
10571057
if err != nil {
10581058
if tc.expErr == "" {
10591059
t.Errorf("unexpected error [%v]", err)

‎pkg/kubelet/cm/cpumanager/cpu_manager_test.go

Copy file name to clipboardExpand all lines: pkg/kubelet/cm/cpumanager/cpu_manager_test.go
+1Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -167,6 +167,7 @@ func makePod(podUID, containerName, cpuRequest, cpuLimit string) *v1.Pod {
167167
}
168168

169169
pod.UID = types.UID(podUID)
170+
pod.Name = podUID
170171
pod.Spec.Containers[0].Name = containerName
171172

172173
return pod

0 commit comments

Comments
0 (0)
Morty Proxy This is a proxified and sanitized view of the page, visit original site.