Skip to content

Commit

Permalink
Implement unit tests for batch processing of check capacity class
Browse files Browse the repository at this point in the history
  • Loading branch information
Duke0404 committed Sep 13, 2024
1 parent 0317ccf commit 5dddce4
Show file tree
Hide file tree
Showing 3 changed files with 428 additions and 19 deletions.
11 changes: 11 additions & 0 deletions cluster-autoscaler/processors/provreq/testutils.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
package provreq

import (
"k8s.io/autoscaler/cluster-autoscaler/provisioningrequest/provreqclient"
"k8s.io/utils/clock/testing"
)

// NewFakePodsInjector creates a new instance of ProvisioningRequestPodsInjector with the given client and clock for testing.
func NewFakePodsInjector(client *provreqclient.ProvisioningRequestClient, clock *testing.FakePassiveClock) *ProvisioningRequestPodsInjector {
return &ProvisioningRequestPodsInjector{client: client, clock: clock}
}
215 changes: 215 additions & 0 deletions cluster-autoscaler/processors/status/scale_up_status_processor.go
Original file line number Diff line number Diff line change
Expand Up @@ -17,6 +17,10 @@ limitations under the License.
package status

import (
"fmt"
"sort"
"strings"

apiv1 "k8s.io/api/core/v1"
"k8s.io/autoscaler/cluster-autoscaler/utils/errors"

Expand Down Expand Up @@ -143,3 +147,214 @@ func UpdateScaleUpError(s *ScaleUpStatus, err errors.AutoscalerError) (*ScaleUpS
s.Result = ScaleUpError
return s, err
}

type combinedStatusSet struct {
Result ScaleUpResult
ScaleupErrors map[*errors.AutoscalerError]bool
ScaleUpInfosSet map[nodegroupset.ScaleUpInfo]bool
PodsTriggeredScaleUpSet map[*apiv1.Pod]bool
PodsRemainUnschedulableSet map[*NoScaleUpInfo]bool
PodsAwaitEvaluationSet map[*apiv1.Pod]bool
CreateNodeGroupResultsSet map[*nodegroups.CreateNodeGroupResult]bool
ConsideredNodeGroupsSet map[cloudprovider.NodeGroup]bool
FailedCreationNodeGroupsSet map[cloudprovider.NodeGroup]bool
FailedResizeNodeGroupsSet map[cloudprovider.NodeGroup]bool
}

func (c *combinedStatusSet) Add(status *ScaleUpStatus) {
// This relies on the fact that the ScaleUpResult enum is ordered in a way that the higher the value, the worse the result. This way we can just take the minimum of the results. If new results are added, either the enum should be updated keeping the order, or a different approach should be used to combine the results.
if c.Result > status.Result {
c.Result = status.Result
}
if status.ScaleUpError != nil {
if _, found := c.ScaleupErrors[status.ScaleUpError]; !found {
c.ScaleupErrors[status.ScaleUpError] = true
}
}
if status.ScaleUpInfos != nil {
for _, scaleUpInfo := range status.ScaleUpInfos {
if _, found := c.ScaleUpInfosSet[scaleUpInfo]; !found {
c.ScaleUpInfosSet[scaleUpInfo] = true
}
}
}
if status.PodsTriggeredScaleUp != nil {
for _, pod := range status.PodsTriggeredScaleUp {
if _, found := c.PodsTriggeredScaleUpSet[pod]; !found {
c.PodsTriggeredScaleUpSet[pod] = true
}
}
}
if status.PodsRemainUnschedulable != nil {
for _, pod := range status.PodsRemainUnschedulable {
if _, found := c.PodsRemainUnschedulableSet[&pod]; !found {
c.PodsRemainUnschedulableSet[&pod] = true
}
}
}
if status.PodsAwaitEvaluation != nil {
for _, pod := range status.PodsAwaitEvaluation {
if _, found := c.PodsAwaitEvaluationSet[pod]; !found {
c.PodsAwaitEvaluationSet[pod] = true
}
}
}
if status.CreateNodeGroupResults != nil {
for _, createNodeGroupResult := range status.CreateNodeGroupResults {
if _, found := c.CreateNodeGroupResultsSet[&createNodeGroupResult]; !found {
c.CreateNodeGroupResultsSet[&createNodeGroupResult] = true
}
}
}
if status.ConsideredNodeGroups != nil {
for _, nodeGroup := range status.ConsideredNodeGroups {
if _, found := c.ConsideredNodeGroupsSet[nodeGroup]; !found {
c.ConsideredNodeGroupsSet[nodeGroup] = true
}
}
}
if status.FailedCreationNodeGroups != nil {
for _, nodeGroup := range status.FailedCreationNodeGroups {
if _, found := c.FailedCreationNodeGroupsSet[nodeGroup]; !found {
c.FailedCreationNodeGroupsSet[nodeGroup] = true
}
}
}
if status.FailedResizeNodeGroups != nil {
for _, nodeGroup := range status.FailedResizeNodeGroups {
if _, found := c.FailedResizeNodeGroupsSet[nodeGroup]; !found {
c.FailedResizeNodeGroupsSet[nodeGroup] = true
}
}
}
}

func (c *combinedStatusSet) formatMessageFromBatchErrors(errs []errors.AutoscalerError, printErrorTypes bool) string {
firstErr := errs[0]
var builder strings.Builder
builder.WriteString(firstErr.Error())
builder.WriteString(" ...and other concurrent errors: [")
formattedErrs := map[errors.AutoscalerError]bool{
firstErr: true,
}
for _, err := range errs {
if _, has := formattedErrs[err]; has {
continue
}
formattedErrs[err] = true
var message string
if printErrorTypes {
message = fmt.Sprintf("[%s] %s", err.Type(), err.Error())
} else {
message = err.Error()
}
if len(formattedErrs) > 2 {
builder.WriteString(", ")
}
builder.WriteString(fmt.Sprintf("%q", message))
}
builder.WriteString("]")
return builder.String()
}

func (c *combinedStatusSet) combineBatchScaleUpErrors() *errors.AutoscalerError {
if len(c.ScaleupErrors) == 0 {
return nil
}
if len(c.ScaleupErrors) == 1 {
for err := range c.ScaleupErrors {
return err
}
}
uniqueMessages := make(map[string]bool)
uniqueTypes := make(map[errors.AutoscalerErrorType]bool)
for err := range c.ScaleupErrors {
uniqueTypes[(*err).Type()] = true
uniqueMessages[(*err).Error()] = true
}
if len(uniqueTypes) == 1 && len(uniqueMessages) == 1 {
for err := range c.ScaleupErrors {
return err
}
}
// sort to stabilize the results and easier log aggregation
errs := make([]errors.AutoscalerError, 0, len(c.ScaleupErrors))
for err := range c.ScaleupErrors {
errs = append(errs, *err)
}
sort.Slice(errs, func(i, j int) bool {
errA := errs[i]
errB := errs[j]
if errA.Type() == errB.Type() {
return errs[i].Error() < errs[j].Error()
}
return errA.Type() < errB.Type()
})
firstErr := errs[0]
printErrorTypes := len(uniqueTypes) > 1
message := c.formatMessageFromBatchErrors(errs, printErrorTypes)
combinedErr := errors.NewAutoscalerError(firstErr.Type(), message)
return &combinedErr
}

func (c *combinedStatusSet) Export() *ScaleUpStatus {
result := &ScaleUpStatus{Result: c.Result}
if len(c.ScaleupErrors) > 0 {
result.ScaleUpError = c.combineBatchScaleUpErrors()
}
if len(c.ScaleUpInfosSet) > 0 {
for scaleUpInfo := range c.ScaleUpInfosSet {
result.ScaleUpInfos = append(result.ScaleUpInfos, scaleUpInfo)
}
}
if len(c.PodsTriggeredScaleUpSet) > 0 {
for pod := range c.PodsTriggeredScaleUpSet {
result.PodsTriggeredScaleUp = append(result.PodsTriggeredScaleUp, pod)
}
}
if len(c.PodsRemainUnschedulableSet) > 0 {
for pod := range c.PodsRemainUnschedulableSet {
result.PodsRemainUnschedulable = append(result.PodsRemainUnschedulable, *pod)
}
}
if len(c.PodsAwaitEvaluationSet) > 0 {
for pod := range c.PodsAwaitEvaluationSet {
result.PodsAwaitEvaluation = append(result.PodsAwaitEvaluation, pod)
}
}
if len(c.CreateNodeGroupResultsSet) > 0 {
for createNodeGroupResult := range c.CreateNodeGroupResultsSet {
result.CreateNodeGroupResults = append(result.CreateNodeGroupResults, *createNodeGroupResult)
}
}
if len(c.ConsideredNodeGroupsSet) > 0 {
for nodeGroup := range c.ConsideredNodeGroupsSet {
result.ConsideredNodeGroups = append(result.ConsideredNodeGroups, nodeGroup)
}
}
if len(c.FailedCreationNodeGroupsSet) > 0 {
for nodeGroup := range c.FailedCreationNodeGroupsSet {
result.FailedCreationNodeGroups = append(result.FailedCreationNodeGroups, nodeGroup)
}
}
if len(c.FailedResizeNodeGroupsSet) > 0 {
for nodeGroup := range c.FailedResizeNodeGroupsSet {
result.FailedResizeNodeGroups = append(result.FailedResizeNodeGroups, nodeGroup)
}
}
return result
}

func NewCombinedStatusSet() combinedStatusSet {
return combinedStatusSet{
ScaleupErrors: make(map[*errors.AutoscalerError]bool),
ScaleUpInfosSet: make(map[nodegroupset.ScaleUpInfo]bool),
PodsTriggeredScaleUpSet: make(map[*apiv1.Pod]bool),
PodsRemainUnschedulableSet: make(map[*NoScaleUpInfo]bool),
PodsAwaitEvaluationSet: make(map[*apiv1.Pod]bool),
CreateNodeGroupResultsSet: make(map[*nodegroups.CreateNodeGroupResult]bool),
ConsideredNodeGroupsSet: make(map[cloudprovider.NodeGroup]bool),
FailedCreationNodeGroupsSet: make(map[cloudprovider.NodeGroup]bool),
FailedResizeNodeGroupsSet: make(map[cloudprovider.NodeGroup]bool),
}
}
Loading

0 comments on commit 5dddce4

Please sign in to comment.