8000 test: code coverage increase for kubelet/preemption · kubernetes/kubernetes@9b0d973 · GitHub
[go: up one dir, main page]

Skip to content

Commit 9b0d973

Browse files
committed
test: code coverage increase for kubelet/preemption
1 parent 6ed5b60 commit 9b0d973

File tree

1 file changed

+196
-31
lines changed

1 file changed

+196
-31
lines changed

pkg/kubelet/preemption/preemption_test.go

Lines changed: 196 additions & 31 deletions
+
pod2: podWithHighCPU,
Original file line numberDiff line numberDiff line change
@@ -26,6 +26,7 @@ import (
2626
"k8s.io/client-go/tools/record"
2727
kubeapi "k8s.io/kubernetes/pkg/apis/core"
2828
"k8s.io/kubernetes/pkg/apis/scheduling"
29+
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
2930
)
3031

3132
const (
@@ -90,52 +91,92 @@ func getTestCriticalPodAdmissionHandler(podProvider *fakePodProvider, podKiller
9091
}
9192
}
9293

93-
func TestEvictPodsToFreeRequests(t *testing.T) {
94+
func TestHandleAdmissionFailure(t *testing.T) {
9495
type testRun struct {
95-
testName string
96-
isPodKillerWithError bool
97-
inputPods []*v1.Pod
98-
insufficientResources admissionRequirementList
99-
expectErr bool
100-
expectedOutput []*v1.Pod
96+
testName string
97+
isPodKillerWithError bool
98+
inputPods []*v1.Pod
99+
admitPodType string
100+
failReasons []lifecycle.PredicateFailureReason
101+
expectErr bool
102+
expectedOutput []*v1.Pod
103+
expectReasons []lifecycle.PredicateFailureReason
101104
}
102105
allPods := getTestPods()
103106
runs := []testRun{
104107
{
105-
testName: "critical pods cannot be preempted",
106-
isPodKillerWithError: false,
107-
inputPods: []*v1.Pod{allPods[clusterCritical]},
108-
insufficientResources: getAdmissionRequirementList(0, 0, 1),
109-
expectErr: true,
110-
expectedOutput: nil,
108+
testName: "critical pods cannot be preempted - no other failure reason",
109+
isPodKillerWithError: false,
110+
inputPods: []*v1.Pod{allPods[clusterCritical]},
111+
admitPodType: clusterCritical,
112+
failReasons: getPredicateFailureReasons(0, 0, 1, false),
113+
expectErr: true,
114+
expectedOutput: nil,
115+
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
111116
},
112117
{
113-
testName: "best effort pods are not preempted when attempting to free resources",
114-
isPodKillerWithError: false,
115-
inputPods: []*v1.Pod{allPods[bestEffort]},
116-
insufficientResources: getAdmissionRequirementList(0, 1, 0),
117-
expectErr: true,
118-
expectedOutput: nil,
118+
testName: "non-critical pod should not trigger eviction - no other failure reason",
119+
isPodKillerWithError: false,
120+
inputPods: []*v1.Pod{allPods[burstable]},
121+
admitPodType: guaranteed,
122+
failReasons: getPredicateFailureReasons(0, 1, 0, false),
123+
expectErr: false,
124+
expectedOutput: nil,
125+
expectReasons: getPredicateFailureReasons(0, 1, 0, false),
119126
},
120127
{
121-
testName: "multiple pods evicted",
128+
testName: "best effort pods are not preempted when attempting to free resources - no other failure reason",
129+
isPodKillerWithError: false,
130+
inputPods: []*v1.Pod{allPods[bestEffort]},
131+
admitPodType: clusterCritical,
132+
failReasons: getPredicateFailureReasons(0, 1, 0, false),
133+
expectErr: 97AE true,
134+
expectedOutput: nil,
135+
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
136+
},
137+
{
138+
testName: "multiple pods evicted - no other failure reason",
122139
isPodKillerWithError: false,
123140
inputPods: []*v1.Pod{
124141
allPods[clusterCritical], allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable],
125142
allPods[guaranteed], allPods[highRequestGuaranteed]},
126-
insufficientResources: getAdmissionRequirementList(0, 550, 0),
127-
expectErr: false,
128-
expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]},
143+
admitPodType: clusterCritical,
144+
failReasons: getPredicateFailureReasons(0, 550, 0, false),
145+
expectErr: false,
146+
expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]},
147+
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
129148
},
130149
{
131-
testName: "multiple pods with eviction error",
150+
testName: "multiple pods with eviction error - no other failure reason",
132151
isPodKillerWithError: true,
133152
inputPods: []*v1.Pod{
134153
allPods[clusterCritical], allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable],
135154
allPods[guaranteed], allPods[highRequestGuaranteed]},
136-
insufficientResources: getAdmissionRequirementList(0, 550, 0),
137-
expectErr: false,
138-
expectedOutput: nil,
155+
admitPodType: clusterCritical,
156+
failReasons: getPredicateFailureReasons(0, 550, 0, false),
157+
expectErr: false,
158+
expectedOutput: nil,
159+
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
160+
},
161+
{
162+
testName: "non-critical pod should not trigger eviction - with other failure reason",
163+
isPodKillerWithError: false,
164+
inputPods: []*v1.Pod{allPods[burstable]},
165+
admitPodType: guaranteed,
166+
failReasons: getPredicateFailureReasons(0, 1, 0, true),
167+
expectErr: false,
168+
expectedOutput: nil,
169+
expectReasons: getPredicateFailureReasons(0, 1, 0, true),
170+
},
171+
{
172+
testName: "critical pods cannot be preempted - with other failure reason",
173+
isPodKillerWithError: false,
174+
inputPods: []*v1.Pod{allPods[clusterCritical]},
175+
admitPodType: clusterCritical,
176+
failReasons: getPredicateFailureReasons(0, 0, 1, true),
177+
expectErr: false,
178+
expectedOutput: nil,
179+
expectReasons: getPredicateFailureReasons(0, 0, 0, true),
139180
},
140181
}
141182
for _, r := range runs {
@@ -144,14 +185,23 @@ func TestEvictPodsToFreeRequests(t *testing.T) {
144185
podKiller := newFakePodKiller(r.isPodKillerWithError)
145186
criticalPodAdmissionHandler := getTestCriticalPodAdmissionHandler(podProvider, podKiller)
146187
podProvider.setPods(r.inputPods)
147-
outErr := criticalPodAdmissionHandler.evictPodsToFreeRequests(allPods[clusterCritical], r.insufficientResources)
188+
admitPodRef := allPods[r.admitPodType]
189+
filteredReason, outErr := criticalPodAdmissionHandler.HandleAdmissionFailure(admitPodRef, r.failReasons)
148190
outputPods := podKiller.getKilledPods()
149191
if !r.expectErr && outErr != nil {
150-
t.Errorf("evictPodsToFreeRequests returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
192+
t.Errorf("HandleAdmissionFailure returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
151193
} else if r.expectErr && outErr == nil {
152-
t.Errorf("evictPodsToFreeRequests expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName)
194+
t.Errorf("HandleAdmissionFailure expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName)
153195
} else if !podListEqual(r.expectedOutput, outputPods) {
154-
t.Errorf("evictPodsToFreeRequests expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName)
196+
t.Errorf("HandleAdmissionFailure expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName)
197+
}
198+
if len(filteredReason) != len(r.expectReasons) {
199+
t.Fatalf("expect reasons %b, got reasons %v", r.expectReasons, filteredReason)
200+
}
201+
for i, reason := range filteredReason {
202+
if reason.GetReason() != r.expectReasons[i].GetReason() {
203+
t.Fatalf("expect reasons %b, got reasons %v", r.expectReasons, filteredReason)
204+
}
155205
}
156206
podKiller.clear()
157207
})
@@ -390,6 +440,84 @@ func TestAdmissionRequirementsSubtract(t *testing.T) {
390440
}
391441
}
392442

443+
func TestSmallerResourceRequest(t *testing.T) {
444+
type testRun struct {
445+
testName string
446+
pod1 *v1.Pod
447+
pod2 *v1.Pod
448+
expectedResult bool
449+
}
450+
451+
podWithNoRequests := getPodWithResources("no-requests", v1.ResourceRequirements{})
452+
podWithLowMemory := getPodWithResources("low-memory", v1.ResourceRequirements{
453+
Requests: v1.ResourceList{
454+
v1.ResourceMemory: resource.MustParse("50Mi"),
455+
v1.ResourceCPU: resource.MustParse("100m"),
456+
},
457+
})
458+
podWithHighMemory := getPodWithResources("high-memory", v1.ResourceRequirements{
459+
Requests: v1.ResourceList{
460+
v1.ResourceMemory: resource.MustParse("200Mi"),
461+
v1.ResourceCPU: resource.MustParse("100m"),
462+
},
463+
})
464+
podWithHighCPU := getPodWithResources("high-cpu", v1.ResourceRequirements{
465+
Requests: v1.ResourceList{
466+
v1.ResourceMemory: resource.MustParse("50Mi"),
467+
v1.ResourceCPU: resource.MustParse("200m"),
468+
},
469+
})
470+
runs := []testRun{
471+
{
472+
testName: "some requests vs no requests should return false",
473+
pod1: podWithLowMemory,
474+
pod2: podWithNoRequests,
475+
expectedResult: false,
476+
},
477+
{
478+
testName: "lower memory should return true",
479+
pod1: podWithLowMemory,
480+
pod2: podWithHighMemory,
481+
expectedResult: true,
482+
},
483+
{
484+
testName: "memory priority over CPU",
485+
pod1: podWithHighMemory,
486
487+
expectedResult: false,
488+
},
489+
{
490+
testName: "equal resource request should return true",
491+
pod1: podWithLowMemory,
492+
pod2: podWithLowMemory,
493+
expectedResult: true,
494+
},
495+
{
496+
testName: "resource type other than CPU and memory are ignored",
497+
pod1: getPodWithResources("high-storage", v1.ResourceRequirements{
498+
Requests: v1.ResourceList{
499+
v1.ResourceStorage: resource.MustParse("300Mi"),
500+
},
501+
}),
502+
pod2: getPodWithResources("low-storage", v1.ResourceRequirements{
503+
Requests: v1.ResourceList{
504+
v1.ResourceStorage: resource.MustParse("200Mi"),
505+
},
506+
}),
507+
expectedResult: true,
508+
},
509+
}
510+
for _, run := range runs {
511+
t.Run(run.testName, func(t *testing.T) {
512+
result := smallerResourceRequest(run.pod1, run.pod2)
513+
if result != run.expectedResult {
514+
t.Fatalf("smallerResourceRequest(%s, %s) = %v, expected %v",
515+
run.pod1.Name, run.pod2.Name, result, run.expectedResult)
516+
}
517+
})
518+
}
519+
}
520+
393521
func getTestPods() map[string]*v1.Pod {
394522
allPods := map[string]*v1.Pod{
395523
tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{
@@ -507,6 +635,43 @@ func getAdmissionRequirementList(cpu, memory, pods int) admissionRequirementList
507635
return admissionRequirementList(reqs)
508636
}
509637

638+
func getPredicateFailureReasons(insufficientCPU, insufficientMemory, insufficientPods int, otherReasonExist bool) (reasonByPredicate []lifecycle.PredicateFailureReason) {
639+
if insufficientCPU > 0 {
640+
parsedN := parseCPUToInt64(fmt.Sprintf("%dm", insufficientCPU))
641+
reasonByPredicate = append(reasonByPredicate, &lifecycle.InsufficientResourceError{
642+
ResourceName: v1.ResourceCPU,
643+
Requested: parsedN,
644+
Capacity: parsedN * 5 / 4,
645+
Used: parsedN * 5 / 4,
646+
})
647+
}
648+
if insufficientMemory > 0 {
649+
parsedN := parseNonCPUResourceToInt64(fmt.Sprintf("%dMi", insufficientMemory))
650+
reasonByPredicate = append(reasonByPredicate, &lifecycle.InsufficientResourceError{
651+
ResourceName: v1.ResourceMemory,
652+
Requested: parsedN,
653+
Capacity: parsedN * 5 / 4,
654+
Used: parsedN * 5 / 4,
655+
})
656+
}
657+
if insufficientPods > 0 {
658+
parsedN := int64(insufficientPods)
659+
reasonByPredicate = append(reasonByPredicate, &lifecycle.InsufficientResourceError{
660+
ResourceName: v1.ResourcePods,
661+
Requested: parsedN,
662+
Capacity: parsedN + 1,
663+
Used: parsedN + 1,
664+
})
665+
}
666+
if otherReasonExist {
667+
reasonByPredicate = append(reasonByPredicate, &lifecycle.PredicateFailureError{
668+
PredicateName: "mock predicate error name",
669+
PredicateDesc: "mock predicate error reason",
670+
})
671+
}
672+
return
673+
}
674+
510675
// this checks if the lists contents contain all of the same elements.
511676
// this is not correct if there are duplicate pods in the list.
512677
// for example: podListEqual([a, a, b], [a, b, b]) will return true

0 commit comments

Comments
 (0)
0