8000 test: code coverage increase for kubelet/preemption by ylink-lfs · Pull Request #132607 · kubernetes/kubernetes · GitHub
[go: up one dir, main page]

Skip to content

test: code coverage increase for kubelet/preemption #132607

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 1 commit into
base: master
Choose a base branch
from
Open
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
229 changes: 197 additions & 32 deletions pkg/kubelet/preemption/preemption_test.go
8000
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@ import (
"k8s.io/client-go/tools/record"
kubeapi "k8s.io/kubernetes/pkg/apis/core"
"k8s.io/kubernetes/pkg/apis/scheduling"
"k8s.io/kubernetes/pkg/kubelet/lifecycle"
)

const (
Expand Down Expand Up @@ -90,70 +91,119 @@ func getTestCriticalPodAdmissionHandler(podProvider *fakePodProvider, podKiller
}
}

func TestEvictPodsToFreeRequests(t *testing.T) {
func TestHandleAdmissionFailure(t *testing.T) {
type testRun struct {
testName string
isPodKillerWithError bool
inputPods []*v1.Pod
insufficientResources admissionRequirementList
expectErr bool
expectedOutput []*v1.Pod
testName string
isPodKillerWithError bool
inputPods []*v1.Pod
admitPodType string
failReasons []lifecycle.PredicateFailureReason
expectErr bool
expectedOutput []*v1.Pod
expectReasons []lifecycle.PredicateFailureReason
}
allPods := getTestPods()
runs := []testRun{
{
testName: "critical pods cannot be preempted",
isPodKillerWithError: false,
inputPods: []*v1.Pod{allPods[clusterCritical]},
insufficientResources: getAdmissionRequirementList(0, 0, 1),
expectErr: true,
expectedOutput: nil,
testName: "critical pods cannot be preempted - no other failure reason",
isPodKillerWithError: false,
inputPods: []*v1.Pod{allPods[clusterCritical]},
admitPodType: clusterCritical,
failReasons: getPredicateFailureReasons(0, 0, 1, false),
expectErr: true,
expectedOutput: nil,
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
},
{
testName: "best effort pods are not preempted when attempting to free resources",
isPodKillerWithError: false,
inputPods: []*v1.Pod{allPods[bestEffort]},
insufficientResources: getAdmissionRequirementList(0, 1, 0),
expectErr: true,
expectedOutput: nil,
testName: "non-critical pod should not trigger eviction - no other failure reason",
isPodKillerWithError: false,
inputPods: []*v1.Pod{allPods[burstable]},
admitPodType: guaranteed,
failReasons: getPredicateFailureReasons(0, 1, 0, false),
expectErr: false,
expectedOutput: nil,
expectReasons: getPredicateFailureReasons(0, 1, 0, false),
},
{
testName: "multiple pods evicted",
testName: "best effort pods are not preempted when attempting to free resources - no other failure reason",
isPodKillerWithError: false,
inputPods: []*v1.Pod{allPods[bestEffort]},
admitPodType: clusterCritical,
failReasons: getPredicateFailureReasons(0, 1, 0, false),
expectErr: true,
expectedOutput: nil,
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
},
{
testName: "multiple pods evicted - no other failure reason",
isPodKillerWithError: false,
inputPods: []*v1.Pod{
allPods[clusterCritical], allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable],
allPods[guaranteed], allPods[highRequestGuaranteed]},
insufficientResources: getAdmissionRequirementList(0, 550, 0),
expectErr: false,
expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]},
admitPodType: clusterCritical,
failReasons: getPredicateFailureReasons(0, 550, 0, false),
expectErr: false,
expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]},
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
},
{
testName: "multiple pods with eviction error",
testName: "multiple pods with eviction error - no other failure reason",
isPodKillerWithError: true,
inputPods: []*v1.Pod{
8000 allPods[clusterCritical], allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable],
allPods[guaranteed], allPods[highRequestGuaranteed]},
insufficientResources: getAdmissionRequirementList(0, 550, 0),
expectErr: false,
expectedOutput: nil,
admitPodType: clusterCritical,
failReasons: getPredicateFailureReasons(0, 550, 0, false),
expectErr: false,
expectedOutput: nil,
expectReasons: getPredicateFailureReasons(0, 0, 0, false),
},
{
testName: "non-critical pod should not trigger eviction - with other failure reason",
isPodKillerWithError: false,
inputPods: []*v1.Pod{allPods[burstable]},
admitPodType: guaranteed,
failReasons: getPredicateFailureReasons(0, 1, 0, true),
expectErr: false,
expectedOutput: nil,
expectReasons: getPredicateFailureReasons(0, 1, 0, true),
},
{
testName: "critical pods cannot be preempted - with other failure reason",
isPodKillerWithError: false,
inputPods: []*v1.Pod{allPods[clusterCritical]},
admitPodType: clusterCritical,
failReasons: getPredicateFailureReasons(0, 0, 1, true),
expectErr: false,
expectedOutput: nil,
expectReasons: getPredicateFailureReasons(0, 0, 0, true),
},
}
for _, r := range runs {
t.Run(r.testName, func(t *testing.T) {
podProvider := newFakePodProvider()
podKiller := newFakePodKiller(r.isPodKillerWithError)
defer podKiller.clear()
criticalPodAdmissionHandler := getTestCriticalPodAdmissionHandler(podProvider, podKiller)
podProvider.setPods(r.inputPods)
outErr := criticalPodAdmissionHandler.evictPodsToFreeRequests(allPods[clusterCritical], r.insufficientResources)
admitPodRef := allPods[r.admitPodType]
filteredReason, outErr := criticalPodAdmissionHandler.HandleAdmissionFailure(admitPodRef, r.failReasons)
outputPods := podKiller.getKilledPods()
if !r.expectErr && outErr != nil {
t.Errorf("evictPodsToFreeRequests returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
t.Errorf("HandleAdmissionFailure returned an unexpected error during the %s test. Err: %v", r.testName, outErr)
} else if r.expectErr && outErr == nil {
t.Errorf("evictPodsToFreeRequests expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName)
t.Errorf("HandleAdmissionFailure expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName)
} else if !podListEqual(r.expectedOutput, outputPods) {
t.Errorf("evictPodsToFreeRequests expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName)
t.Errorf("HandleAdmissionFailure expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName)
}
if len(filteredReason) != len(r.expectReasons) {
t.Fatalf("expect reasons %b, got reasons %v", r.expectReasons, filteredReason)
}
for i, reason := range filteredReason {
if reason.GetReason() != r.expectReasons[i].GetReason() {
t.Fatalf("expect reasons %b, got reasons %v", r.expectReasons, filteredReason)
}
}
podKiller.clear()
})
}
}
Expand Down Expand Up @@ -390,6 +440,84 @@ func TestAdmissionRequirementsSubtract(t *testing.T) {
}
}

func TestSmallerResourceRequest(t *testing.T) {
type testRun struct {
testName string
pod1 *v1.Pod
pod2 *v1.Pod
expectedResult bool
}

podWithNoRequests := getPodWithResources("no-requests", v1.ResourceRequirements{})
podWithLowMemory := getPodWithResources("low-memory", v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
},
})
podWithHighMemory := getPodWithResources("high-memory", v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("200Mi"),
v1.ResourceCPU: resource.MustParse("100m"),
},
})
podWithHighCPU := getPodWithResources("high-cpu", v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceMemory: resource.MustParse("50Mi"),
v1.ResourceCPU: resource.MustParse("200m"),
},
})
runs := []testRun{
{
testName: "some requests vs no requests should return false",
pod1: podWithLowMemory,
pod2: podWithNoRequests,
expectedResult: false,
},
{
testName: "lower memory should return true",
pod1: podWithLowMemory,
pod2: podWithHighMemory,
expectedResult: true,
},
{
testName: "memory priority over CPU",
pod1: podWithHighMemory,
pod2: podWithHighCPU,
expectedResult: false,
},
{
testName: "equal resource request should return true",
pod1: podWithLowMemory,
pod2: podWithLowMemory,
expectedResult: true,
},
{
testName: "resource type other than CPU and memory are ignored",
pod1: getPodWithResources("high-storage", v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("300Mi"),
},
}),
pod2: getPodWithResources("low-storage", v1.ResourceRequirements{
Requests: v1.ResourceList{
v1.ResourceStorage: resource.MustParse("200Mi"),
},
}),
expectedResult: true,
},
}
for _, run := range runs {
t.Run(run.testName, func(t *testing.T) {
result := smallerResourceRequest(run.pod1, run.pod2)
if result != run.expectedResult {
t.Fatalf("smallerResourceRequest(%s, %s) = %v, expected %v",
run.pod1.Name, run.pod2.Name, result, run.expectedResult)
}
})
}
}

func getTestPods() map[string]*v1.Pod {
allPods := map[string]*v1.Pod{
tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{
Expand Down Expand Up @@ - 8000 507,6 +635,43 @@ func getAdmissionRequirementList(cpu, memory, pods int) admissionRequirementList
return admissionRequirementList(reqs)
}

func getPredicateFailureReasons(insufficientCPU, insufficientMemory, insufficientPods int, otherReasonExist bool) (reasonByPredicate []lifecycle.PredicateFailureReason) {
if insufficientCPU > 0 {
parsedN := parseCPUToInt64(fmt.Sprintf("%dm", insufficientCPU))
reasonByPredicate = append(reasonByPredicate, &lifecycle.InsufficientResourceError{
ResourceName: v1.ResourceCPU,
Requested: parsedN,
Capacity: parsedN * 5 / 4,
Used: parsedN * 5 / 4,
})
}
if insufficientMemory > 0 {
parsedN := parseNonCPUResourceToInt64(fmt.Sprintf("%dMi", insufficientMemory))
reasonByPredicate = append(reasonByPredicate, &lifecycle.InsufficientResourceError{
ResourceName: v1.ResourceMemory,
Requested: parsedN,
Capacity: parsedN * 5 / 4,
Used: parsedN * 5 / 4,
})
}
if insufficientPods > 0 {
parsedN := int64(insufficientPods)
reasonByPredicate = append(reasonByPredicate, &lifecycle.InsufficientResourceError{
ResourceName: v1.ResourcePods,
Requested: parsedN,
Capacity: parsedN + 1,
Used: parsedN + 1,
})
}
if otherReasonExist {
reasonByPredicate = append(reasonByPredicate, &lifecycle.PredicateFailureError{
PredicateName: "mock predicate error name",
PredicateDesc: "mock predicate error reason",
})
}
return
}

// this checks if the lists contents contain all of the same elements.
// this is not correct if there are duplicate pods in the list.
// for example: podListEqual([a, a, b], [a, b, b]) will return true
Expand Down
0