8000 [PodLevelResources] Update Downward API defaulting for resource limits · kubernetes/kubernetes@bedd1ef · GitHub
[go: up one dir, main page]

Skip to content

Commit bedd1ef

Browse files
committed
[PodLevelResources] Update Downward API defaulting for resource limits
Currently, when container-level resource limits were not specified and the Downward API was used to set environment variables referencing them, the node's allocatable resources were used as the fallback. With the introduction of the Pod Level Resources feature, this behavior is updated: if container-level resource limits are not specified, the Downward API now uses the pod-level resource limits instead. If neither container-level nor pod-level resource limits are specified, the behavior remains unchanged. It falls back to the node's allocatable resources. Signed-off-by: Tsubasa Nagasawa <toversus2357@gmail.com>
1 parent b2f27c0 commit bedd1ef

File tree

5 files changed

+300
-11
lines changed

5 files changed

+300
-11
lines changed

pkg/kubelet/kubelet_resources.go

Lines changed: 26 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -21,18 +21,21 @@ import (
2121

2222
"k8s.io/klog/v2"
2323

24-
"k8s.io/api/core/v1"
24+
corev1 "k8s.io/api/core/v1"
25+
utilfeature "k8s.io/apiserver/pkg/util/feature"
26+
resourcehelper "k8s.io/component-helpers/resource"
2527
"k8s.io/kubernetes/pkg/api/v1/resource"
28+
kubefeatures "k8s.io/kubernetes/pkg/features"
2629
)
2730

2831
// defaultPodLimitsForDownwardAPI copies the input pod, and optional container,
2932
// and applies default resource limits. it returns a copy of the input pod,
3033
// and a copy of the input container (if specified) with default limits
31-
// applied. if a container has no limit specified, it will default the limit to
32-
// the node allocatable.
33-
// TODO: if/when we have pod level resources, we need to update this function
34-
// to use those limits instead of node allocatable.
35-
func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Container) (*v1.Pod, *v1.Container, error) {
34+
// applied.
35+
// If a container has no limits specified, it defaults to the pod-level resources.
36+
// If neither container-level nor pod-level resources are specified, it defaults
37+
// to the node's allocatable resources.
38+
func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *corev1.Pod, container *corev1.Container) (*corev1.Pod, *corev1.Container, error) {
3639
if pod == nil {
3740
return nil, nil, fmt.Errorf("invalid input, pod cannot be nil")
3841
}
@@ -42,13 +45,29 @@ func (kl *Kubelet) defaultPodLimitsForDownwardAPI(pod *v1.Pod, container *v1.Con
4245
return nil, nil, fmt.Errorf("failed to find node object, expected a node")
4346
}
4447
allocatable := node.Status.Allocatable
48+
if utilfeature.DefaultFeatureGate.Enabled(kubefeatures.PodLevelResources) && resourcehelper.IsPodLevelLimitsSet(pod) {
49+
allocatable = allocatable.DeepCopy()
50+
// Resources supported by the Downward API
51+
for _, resource := range []corev1.ResourceName{corev1.ResourceCPU, corev1.ResourceMemory, corev1.ResourceEphemeralStorage} {
52+
// Skip resources not supported by Pod Level Resources
53+
if !resourcehelper.IsSupportedPodLevelResource(resource) {
54+
continue
55+
}
56+
if val, exists := pod.Spec.Resources.Limits[resource]; exists && !val.IsZero() {
57+
if _, exists := allocatable[resource]; exists {
58+
allocatable[resource] = val.DeepCopy()
59+
}
60+
}
61+
}
62+
}
63+
4564
klog.InfoS("Allocatable", "allocatable", allocatable)
4665
outputPod := pod.DeepCopy()
4766
for idx := range outputPod.Spec.Containers {
4867
resource.MergeContainerResourceLimits(&outputPod.Spec.Containers[idx], allocatable)
4968
}
5069

51-
var outputContainer *v1.Container
70+
var outputContainer *corev1.Container
5271
if container != nil {
5372
outputContainer = container.DeepCopy()
5473
resource.MergeContainerResourceLimits(outputContainer, allocatable)

pkg/kubelet/kubelet_resources_test.go

Lines changed: 48 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -25,6 +25,9 @@ import (
2525
apiequality "k8s.io/apimachinery/pkg/api/equality"
2626
"k8s.io/apimachinery/pkg/api/resource"
2727
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
28+
utilfeature "k8s.io/apiserver/pkg/util/feature"
29+
featuregatetesting "k8s.io/component-base/featuregate/testing"
30+
kubefeatures "k8s.io/kubernetes/pkg/features"
2831
)
2932

3033
func TestPodResourceLimitsDefaulting(t *testing.T) {
@@ -46,8 +49,9 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
4649
},
4750
}
4851
cases := []struct {
49-
pod *v1.Pod
50-
expected *v1.Pod
52+
pod *v1.Pod
53+
expected *v1.Pod
54+
podLevelResourcesEnabled bool
5155
}{
5256
{
5357
pod: getPod("0", "0"),
@@ -65,9 +69,35 @@ func TestPodResourceLimitsDefaulting(t *testing.T) {
6569
pod: getPod("0", "1Mi"),
6670
expected: getPod("6", "1Mi"),
6771
},
72+
{
73+
pod: getPodWithPodLevelResources("0", "1Mi", "0", "0"),
74+
expected: getPodWithPodLevelResources("0", "1Mi", "6", "1Mi"),
75+
podLevelResourcesEnabled: true,
76+
},
77+
{
78+
pod: getPodWithPodLevelResources("1", "0", "0", "0"),
79+
expected: getPodWithPodLevelResources("1", "0", "1", "4Gi"),
80+
podLevelResourcesEnabled: true,
81+
},
82+
{
83+
pod: getPodWithPodLevelResources("1", "1Mi", "", ""),
84+
expected: getPodWithPodLevelResources("1", "1Mi", "1", "1Mi"),
85+
podLevelResourcesEnabled: true,
86+
},
87+
{
88+
pod: getPodWithPodLevelResources("1", "5Mi", "0", "1Mi"),
89+
expected: getPodWithPodLevelResources("1", "5Mi", "1", "1Mi"),
90+
podLevelResourcesEnabled: true,
91+
},
92+
{
93+
pod: getPodWithPodLevelResources("1", "5Mi", "1", "1Mi"),
94+
expected: getPodWithPodLevelResources("1", "5Mi", "1", "1Mi"),
95+
podLevelResourcesEnabled: true,
96+
},
6897
}
6998
as := assert.New(t)
7099
for idx, tc := range cases {
100+
featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, kubefeatures.PodLevelResources, tc.podLevelResourcesEnabled)
71101
actual, _, err := tk.kubelet.defaultPodLimitsForDownwardAPI(tc.pod, nil)
72102
as.NoError(err, "failed to default pod limits: %v", err)
73103
if !apiequality.Semantic.DeepEqual(tc.expected, actual) {
@@ -98,3 +128,19 @@ func getPod(cpuLimit, memoryLimit string) *v1.Pod {
98128
},
99129
}
100130
}
131+
132+
func getPodWithPodLevelResources(plCPULimit, plMemoryLimit, clCPULimit, clMemoryLimit string) *v1.Pod {
133+
pod := getPod(clCPULimit, clMemoryLimit)
134+
resources := v1.ResourceRequirements{}
135+
if plCPULimit != "" || plMemoryLimit != "" {
136+
resources.Limits = make(v1.ResourceList)
137+
}
138+
if plCPULimit != "" {
139+
resources.Limits[v1.ResourceCPU] = resource.MustParse(plCPULimit)
140+
}
141+
if plMemoryLimit != "" {
142+
resources.Limits[v1.ResourceMemory] = resource.MustParse(plMemoryLimit)
143+
}
144+
pod.Spec.Resources = &resources
145+
return pod
146+
}

staging/src/k8s.io/component-helpers/resource/helpers.go

Lines changed: 20 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -114,6 +114,26 @@ func IsPodLevelRequestsSet(pod *v1.Pod) bool {
114114
return false
115115
}
116116

117+
// IsPodLevelLimitsSet checks if pod-level limits are set. It returns true if
118+
// Limits map is non-empty and contains at least one supported pod-level resource.
119+
func IsPodLevelLimitsSet(pod *v1.Pod) bool {
120+
if pod.Spec.Resources == nil {
121+
return false
122+
}
123+
124+
if len(pod.Spec.Resources.Limits) == 0 {
125+
return false
126+
}
127+
128+
for resourceName := range pod.Spec.Resources.Limits {
129+
if IsSupportedPodLevelResource(resourceName) {
130+
return true
131+
}
132+
}
133+
134+
return false
135+
}
136+
117137
// PodRequests computes the total pod requests per the PodResourcesOptions supplied.
118138
// If PodResourcesOptions is nil, then the requests are returned including pod overhead.
119139
// If the PodLevelResources feature is enabled AND the pod-level resources are set,

staging/src/k8s.io/component-helpers/resource/helpers_test.go

Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1554,6 +1554,58 @@ func TestIsPodLevelResourcesSet(t *testing.T) {
15541554

15551555
}
15561556

1557+
func TestIsPodLevelLimitsSet(t *testing.T) {
1558+
testCases := []struct {
1559+
name string
1560+
podResources *v1.ResourceRequirements
1561+
expected bool
1562+
}{
1563+
{
1564+
name: "nil resources struct",
1565+
expected: false,
1566+
},
1567+
{
1568+
name: "empty resources struct",
1569+
podResources: &v1.ResourceRequirements{},
1570+
expected: false,
1571+
},
1572+
{
1573+
name: "only resource requests set",
1574+
podResources: &v1.ResourceRequirements{
1575+
Requests: v1.ResourceList{v1.ResourceMemory: resource.MustParse("100Mi")},
1576+
},
1577+
expected: false,
1578+
},
1579+
{
1580+
name: "only unsupported resource limits set",
1581+
podResources: &v1.ResourceRequirements{
1582+
Limits: v1.ResourceList{v1.ResourceEphemeralStorage: resource.MustParse("1Mi")},
1583+
},
1584+
expected: false,
1585+
},
1586+
{
1587+
name: "unsupported and suported resources limits set",
1588+
podResources: &v1.ResourceRequirements{
1589+
Limits: v1.ResourceList{
1590+
v1.ResourceEphemeralStorage: resource.MustParse("1Mi"),
1591+
v1.ResourceCPU: resource.MustParse("1m"),
1592+
},
1593+
},
1594+
expected: true,
1595+
},
1596+
}
1597+
1598+
for _, tc := range testCases {
1599+
t.Run(tc.name, func(t *testing.T) {
1600+
testPod := &v1.Pod{Spec: v1.PodSpec{Resources: tc.podResources}}
1601+
if got := IsPodLevelLimitsSet(testPod); got != tc.expected {
1602+
t.Errorf("got=%t, want=%t", got, tc.expected)
1603+
}
1604+
})
1605+
}
1606+
1607+
}
1608+
15571609
func TestPodLevelResourceRequests(t *testing.T) {
15581610
restartAlways := v1.ContainerRestartPolicyAlways
15591611
testCases := []struct {

0 commit comments

Comments
 (0)
0