Skip to content

feat: add NodeResourcesFitPlus and ScarceResourceAvoidance plugin #843

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Open
wants to merge 3 commits into
base: master
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions apis/config/register.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,8 @@ func addKnownTypes(scheme *runtime.Scheme) error {
&NetworkOverheadArgs{},
&SySchedArgs{},
&PeaksArgs{},
&NodeResourcesFitPlusArgs{},
&ScarceResourceAvoidanceArgs{},
)
return nil
}
Expand Down
21 changes: 21 additions & 0 deletions apis/config/types.go
Original file line number Diff line number Diff line change
Expand Up @@ -298,3 +298,24 @@ type PowerModel struct {
// Power = K0 + K1 * e ^(K2 * x) : where x is utilisation
// Idle power of node will be K0 + K1
}

// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

// ScarceResourceAvoidanceArgs defines the parameters for ScarceResourceAvoidance plugin.
type ScarceResourceAvoidanceArgs struct {
metav1.TypeMeta
Resources []v1.ResourceName `json:"resources,omitempty"`
}

// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object

// NodeResourcesFitPlusArgs defines the parameters for NodeResourcesFitPlus plugin.
type NodeResourcesFitPlusArgs struct {
metav1.TypeMeta
Resources map[v1.ResourceName]ResourcesType `json:"resources"`
}

type ResourcesType struct {
Type schedconfig.ScoringStrategyType `json:"type"`
Weight int64 `json:"weight"`
}
63 changes: 63 additions & 0 deletions apis/config/zz_generated.deepcopy.go

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

4 changes: 4 additions & 0 deletions cmd/scheduler/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -18,6 +18,8 @@ package main

import (
"os"
noderesourcesfitplus "sigs.k8s.io/scheduler-plugins/pkg/noderesourcefitplus"
"sigs.k8s.io/scheduler-plugins/pkg/scarceresourceavoidance"

"k8s.io/component-base/cli"
_ "k8s.io/component-base/metrics/prometheus/clientgo" // for rest client metric registration
Expand Down Expand Up @@ -64,6 +66,8 @@ func main() {
// app.WithPlugin(crossnodepreemption.Name, crossnodepreemption.New),
app.WithPlugin(podstate.Name, podstate.New),
app.WithPlugin(qos.Name, qos.New),
app.WithPlugin(noderesourcesfitplus.Name, noderesourcesfitplus.New),
app.WithPlugin(scarceresourceavoidance.Name, scarceresourceavoidance.New),
)

code := cli.Run(command)
Expand Down
152 changes: 152 additions & 0 deletions pkg/noderesourcefitplus/node_resources_fit_plus.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,152 @@
package noderesourcesfitplus

import (
"context"
"fmt"

v1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/kubernetes/pkg/api/v1/resource"
k8sConfig "k8s.io/kubernetes/pkg/scheduler/apis/config"
"k8s.io/kubernetes/pkg/scheduler/framework"
plfeature "k8s.io/kubernetes/pkg/scheduler/framework/plugins/feature"
"k8s.io/kubernetes/pkg/scheduler/framework/plugins/noderesources"
"sigs.k8s.io/scheduler-plugins/apis/config"
)

const (
// Name is plugin name
Name = "NodeResourcesFitPlus"
)

var (
_ framework.ScorePlugin = &Plugin{}
)

type Plugin struct {
handle framework.Handle
args *config.NodeResourcesFitPlusArgs
}

func New(_ context.Context, args runtime.Object, handle framework.Handle) (framework.Plugin, error) {

nodeResourcesFitPlusArgs, ok := args.(*config.NodeResourcesFitPlusArgs)

if !ok {
return nil, fmt.Errorf("want args to be of type NodeResourcesFitPlusArgs, got %T", args)
}

return &Plugin{
handle: handle,
args: nodeResourcesFitPlusArgs,
}, nil
}

func (s *Plugin) Name() string {
return Name
}

func (s *Plugin) Score(ctx context.Context, state *framework.CycleState, p *v1.Pod, nodeName string) (int64, *framework.Status) {
nodeInfo, err := s.handle.SnapshotSharedLister().NodeInfos().Get(nodeName)
if err != nil {
return 0, framework.NewStatus(framework.Error, fmt.Sprintf("getting node %q from Snapshot: %v", nodeName, err))
}

var nodeScore int64
var weightSum int64

podRequest, _ := fitsRequest(computePodResourceRequest(p).Resource, nodeInfo)

for _, requestSourceName := range podRequest {
v, ok := s.args.Resources[requestSourceName]
if !ok {
continue
}
fit, err := noderesources.NewFit(ctx,
&k8sConfig.NodeResourcesFitArgs{
ScoringStrategy: &k8sConfig.ScoringStrategy{
Type: v.Type, // MostAllocated or LeastAllocated
Resources: []k8sConfig.ResourceSpec{
{Name: string(requestSourceName), Weight: 1},
},
},
}, s.handle, plfeature.Features{})

if err != nil {
return 0, framework.NewStatus(framework.Error, err.Error())
}
resourceScore, status := fit.(framework.ScorePlugin).Score(ctx, state, p, nodeName)
if !status.IsSuccess() {
return 0, framework.NewStatus(framework.Error, err.Error())
}

nodeScore += resourceScore * v.Weight
weightSum += v.Weight
}

if weightSum == 0 {
return framework.MaxNodeScore, framework.NewStatus(framework.Success, "")
}
scores := nodeScore / weightSum

return scores, nil
}

func (p *Plugin) ScoreExtensions() framework.ScoreExtensions {
return nil
}

type preFilterState struct {
framework.Resource
}

func computePodResourceRequest(pod *v1.Pod) *preFilterState {
// pod hasn't scheduled yet so we don't need to worry about InPlacePodVerticalScalingEnabled
reqs := resource.PodRequests(pod, resource.PodResourcesOptions{})
result := &preFilterState{}
result.SetMaxResource(reqs)
return result
}

func fitsRequest(podRequest framework.Resource, nodeInfo *framework.NodeInfo) ([]v1.ResourceName, []v1.ResourceName) {
var podRequestResource []v1.ResourceName
var nodeRequestResource []v1.ResourceName

if podRequest.MilliCPU > 0 {
podRequestResource = append(podRequestResource, v1.ResourceCPU)
}

if nodeInfo.Allocatable.MilliCPU > 0 {
nodeRequestResource = append(nodeRequestResource, v1.ResourceCPU)
}

if podRequest.Memory > 0 {
podRequestResource = append(podRequestResource, v1.ResourceMemory)
}

if nodeInfo.Allocatable.Memory > 0 {
nodeRequestResource = append(nodeRequestResource, v1.ResourceMemory)
}

if podRequest.EphemeralStorage > 0 {
podRequestResource = append(podRequestResource, v1.ResourceEphemeralStorage)
}

if nodeInfo.Allocatable.EphemeralStorage > 0 {
nodeRequestResource = append(nodeRequestResource, v1.ResourceEphemeralStorage)
}

for rName, rQuant := range podRequest.ScalarResources {
if rQuant > 0 {
podRequestResource = append(podRequestResource, rName)
}
}

for rName, rQuant := range nodeInfo.Allocatable.ScalarResources {
if rQuant > 0 {
nodeRequestResource = append(nodeRequestResource, rName)
}
}

return podRequestResource, nodeRequestResource
}
Loading