Skip to content

Commit 346399e

Browse files
authored
Merge pull request #13108 from fabriziopandini/bump-golangci-lint-v2.7.0
🌱 Bump golangci-lint v2.7.0
2 parents 6a03872 + af6715a commit 346399e

File tree

10 files changed

+47
-19
lines changed

10 files changed

+47
-19
lines changed

.github/workflows/pr-golangci-lint.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -34,7 +34,7 @@ jobs:
3434
- name: golangci-lint
3535
uses: golangci/golangci-lint-action@e7fa5ac41e1cf5b7d48e45e42232ce7ada589601 # tag=v9.1.0
3636
with:
37-
version: v2.4.0
37+
version: v2.7.0
3838
working-directory: ${{matrix.working-directory}}
3939
- name: Lint API
4040
run: make lint-api

.golangci.yml

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -407,6 +407,22 @@ linters:
407407
- staticcheck
408408
path: (.+)\.go$
409409
text: 'QF1008: could remove embedded field'
410+
- linters:
411+
- revive
412+
path: errors/.*\.go$
413+
text: 'var-naming: avoid package names that conflict with Go standard library package names'
414+
- linters:
415+
- revive
416+
path: internal/util/hash/.*\.go$
417+
text: 'var-naming: avoid package names that conflict with Go standard library package names'
418+
- linters:
419+
- revive
420+
path: internal/controllers/topology/cluster/patches/api/.*\.go$
421+
text: 'var-naming: avoid meaningless package names'
422+
- linters:
423+
- revive
424+
path: test/infrastructure/inmemory/pkg/server/api/.*\.go$
425+
text: 'var-naming: avoid meaningless package names'
410426
issues:
411427
max-issues-per-linter: 0
412428
max-same-issues: 0

api/core/v1beta2/cluster_types.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -86,17 +86,20 @@ const (
8686

8787
// ClusterTopologyReconciledControlPlaneUpgradePendingReason documents reconciliation of a Cluster topology
8888
// not yet completed because Control Plane is not yet updated to match the desired topology spec.
89+
//
8990
// Deprecated: please use ClusterUpgrading instead.
9091
ClusterTopologyReconciledControlPlaneUpgradePendingReason = "ControlPlaneUpgradePending"
9192

9293
// ClusterTopologyReconciledMachineDeploymentsCreatePendingReason documents reconciliation of a Cluster topology
9394
// not yet completed because at least one of the MachineDeployments is yet to be created.
9495
// This generally happens because new MachineDeployment creations are held off while the ControlPlane is not stable.
96+
//
9597
// Deprecated: please use ClusterUpgrading instead.
9698
ClusterTopologyReconciledMachineDeploymentsCreatePendingReason = "MachineDeploymentsCreatePending"
9799

98100
// ClusterTopologyReconciledMachineDeploymentsUpgradePendingReason documents reconciliation of a Cluster topology
99101
// not yet completed because at least one of the MachineDeployments is not yet updated to match the desired topology spec.
102+
//
100103
// Deprecated: please use ClusterUpgrading instead.
101104
ClusterTopologyReconciledMachineDeploymentsUpgradePendingReason = "MachineDeploymentsUpgradePending"
102105

@@ -106,12 +109,14 @@ const (
106109

107110
// ClusterTopologyReconciledMachinePoolsUpgradePendingReason documents reconciliation of a Cluster topology
108111
// not yet completed because at least one of the MachinePools is not yet updated to match the desired topology spec.
112+
//
109113
// Deprecated: please use ClusterUpgrading instead.
110114
ClusterTopologyReconciledMachinePoolsUpgradePendingReason = "MachinePoolsUpgradePending"
111115

112116
// ClusterTopologyReconciledMachinePoolsCreatePendingReason documents reconciliation of a Cluster topology
113117
// not yet completed because at least one of the MachinePools is yet to be created.
114118
// This generally happens because new MachinePool creations are held off while the ControlPlane is not stable.
119+
//
115120
// Deprecated: please use ClusterUpgrading instead.
116121
ClusterTopologyReconciledMachinePoolsCreatePendingReason = "MachinePoolsCreatePending"
117122

@@ -121,6 +126,7 @@ const (
121126

122127
// ClusterTopologyReconciledHookBlockingReason documents reconciliation of a Cluster topology
123128
// not yet completed because at least one of the lifecycle hooks is blocking.
129+
//
124130
// Deprecated: please use ClusterUpgrading instead.
125131
ClusterTopologyReconciledHookBlockingReason = "LifecycleHookBlocking"
126132

api/core/v1beta2/v1beta1_condition_consts.go

Lines changed: 6 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -306,17 +306,20 @@ const (
306306

307307
// TopologyReconciledControlPlaneUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
308308
// not yet completed because Control Plane is not yet updated to match the desired topology spec.
309+
//
309310
// Deprecated: please use ClusterUpgrading instead.
310311
TopologyReconciledControlPlaneUpgradePendingV1Beta1Reason = "ControlPlaneUpgradePending"
311312

312313
// TopologyReconciledMachineDeploymentsCreatePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
313314
// not yet completed because at least one of the MachineDeployments is yet to be created.
314315
// This generally happens because new MachineDeployment creations are held off while the ControlPlane is not stable.
316+
//
315317
// Deprecated: please use ClusterUpgrading instead.
316318
TopologyReconciledMachineDeploymentsCreatePendingV1Beta1Reason = "MachineDeploymentsCreatePending"
317319

318320
// TopologyReconciledMachineDeploymentsUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
319321
// not yet completed because at least one of the MachineDeployments is not yet updated to match the desired topology spec.
322+
//
320323
// Deprecated: please use ClusterUpgrading instead.
321324
TopologyReconciledMachineDeploymentsUpgradePendingV1Beta1Reason = "MachineDeploymentsUpgradePending"
322325

@@ -326,12 +329,14 @@ const (
326329

327330
// TopologyReconciledMachinePoolsUpgradePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
328331
// not yet completed because at least one of the MachinePools is not yet updated to match the desired topology spec.
332+
//
329333
// Deprecated: please use ClusterUpgrading instead.
330334
TopologyReconciledMachinePoolsUpgradePendingV1Beta1Reason = "MachinePoolsUpgradePending"
331335

332336
// TopologyReconciledMachinePoolsCreatePendingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
333337
// not yet completed because at least one of the MachinePools is yet to be created.
334338
// This generally happens because new MachinePool creations are held off while the ControlPlane is not stable.
339+
//
335340
// Deprecated: please use ClusterUpgrading instead.
336341
TopologyReconciledMachinePoolsCreatePendingV1Beta1Reason = "MachinePoolsCreatePending"
337342

@@ -341,6 +346,7 @@ const (
341346

342347
// TopologyReconciledHookBlockingV1Beta1Reason (Severity=Info) documents reconciliation of a Cluster topology
343348
// not yet completed because at least one of the lifecycle hooks is blocking.
349+
//
344350
// Deprecated: please use ClusterUpgrading instead.
345351
TopologyReconciledHookBlockingV1Beta1Reason = "LifecycleHookBlocking"
346352

hack/tools/.custom-gcl.yaml

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,4 @@
1-
version: v2.5.0
1+
version: v2.7.0
22
name: golangci-lint-kube-api-linter
33
destination: ./bin
44
plugins:

internal/controllers/machinedeployment/machinedeployment_rollout_ondelete.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -111,7 +111,7 @@ func (p *rolloutPlanner) reconcileOldMachineSetsOnDelete(ctx context.Context) {
111111
scaleDownCount := max(ptr.Deref(oldMS.Spec.Replicas, 0)-ptr.Deref(oldMS.Status.Replicas, 0), 0)
112112
if scaleDownCount > 0 {
113113
newScaleIntent := max(ptr.Deref(oldMS.Spec.Replicas, 0)-scaleDownCount, 0)
114-
p.addNote(oldMS, "scale down to align to existing Machines")
114+
p.addNotef(oldMS, "scale down to align to existing Machines")
115115
log.V(5).Info(fmt.Sprintf("Setting scale down intent for MachineSet %s to %d replicas (-%d)", oldMS.Name, newScaleIntent, scaleDownCount), "MachineSet", klog.KObj(oldMS))
116116
p.scaleIntents[oldMS.Name] = newScaleIntent
117117

@@ -139,7 +139,7 @@ func (p *rolloutPlanner) reconcileOldMachineSetsOnDelete(ctx context.Context) {
139139
scaleDownCount := min(scaleIntent, totalScaleDownCount)
140140
if scaleDownCount > 0 {
141141
newScaleIntent := max(ptr.Deref(oldMS.Spec.Replicas, 0)-scaleDownCount, 0)
142-
p.addNote(oldMS, "scale down to align MachineSet spec.replicas to MachineDeployment spec.replicas")
142+
p.addNotef(oldMS, "scale down to align MachineSet spec.replicas to MachineDeployment spec.replicas")
143143
log.V(5).Info(fmt.Sprintf("Setting scale down intent for MachineSet %s to %d replicas (-%d)", oldMS.Name, newScaleIntent, scaleDownCount), "MachineSet", klog.KObj(oldMS))
144144
p.scaleIntents[oldMS.Name] = newScaleIntent
145145

internal/controllers/machinedeployment/machinedeployment_rollout_planner.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -133,7 +133,7 @@ func (p *rolloutPlanner) init(ctx context.Context, md *clusterv1.MachineDeployme
133133
// the new revision number is also surfaced in p.revision, and thus we are using this information
134134
// to determine when to add this note.
135135
if oldRevision != p.revision {
136-
p.addNote(p.newMS, "this is now the current MachineSet")
136+
p.addNotef(p.newMS, "this is now the current MachineSet")
137137
}
138138
return nil
139139
}
@@ -340,7 +340,7 @@ func computeDesiredMS(ctx context.Context, deployment *clusterv1.MachineDeployme
340340
return desiredMS, nil
341341
}
342342

343-
func (p *rolloutPlanner) addNote(ms *clusterv1.MachineSet, format string, a ...any) {
343+
func (p *rolloutPlanner) addNotef(ms *clusterv1.MachineSet, format string, a ...any) {
344344
msg := fmt.Sprintf(format, a...)
345345
for _, note := range p.notes[ms.Name] {
346346
if note == msg {

internal/controllers/machinedeployment/machinedeployment_rollout_rollingupdate.go

Lines changed: 10 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -146,7 +146,7 @@ func (p *rolloutPlanner) reconcileReplicasPendingAcknowledgeMove(ctx context.Con
146146
replicaCount := ptr.Deref(p.newMS.Spec.Replicas, 0) + totNewAcknowledgeMoveReplicasToScaleUp
147147
scaleUpCount := totNewAcknowledgeMoveReplicasToScaleUp
148148
p.newMS.Spec.Replicas = ptr.To(replicaCount)
149-
p.addNote(p.newMS, "acknowledge Machines %s moved from an old MachineSet", sortAndJoin(newAcknowledgeMoveReplicas.UnsortedList()))
149+
p.addNotef(p.newMS, "acknowledge Machines %s moved from an old MachineSet", sortAndJoin(newAcknowledgeMoveReplicas.UnsortedList()))
150150
log.V(5).Info(fmt.Sprintf("Acknowledge replicas %s moved from an old MachineSet. Scale up MachineSet %s to %d (+%d)", sortAndJoin(newAcknowledgeMoveReplicas.UnsortedList()), p.newMS.Name, replicaCount, scaleUpCount), "MachineSet", klog.KObj(p.newMS))
151151
}
152152

@@ -187,7 +187,7 @@ func (p *rolloutPlanner) reconcileNewMachineSet(ctx context.Context) error {
187187

188188
if *(p.newMS.Spec.Replicas) > *(p.md.Spec.Replicas) {
189189
// Scale down.
190-
p.addNote(p.newMS, "scale down to align MachineSet spec.replicas to MachineDeployment spec.replicas")
190+
p.addNotef(p.newMS, "scale down to align MachineSet spec.replicas to MachineDeployment spec.replicas")
191191
log.V(5).Info(fmt.Sprintf("Setting scale down intent for MachineSet %s to %d replicas", p.newMS.Name, *(p.md.Spec.Replicas)), "MachineSet", klog.KObj(p.newMS))
192192
p.scaleIntents[p.newMS.Name] = *(p.md.Spec.Replicas)
193193
return nil
@@ -200,13 +200,13 @@ func (p *rolloutPlanner) reconcileNewMachineSet(ctx context.Context) error {
200200

201201
if newReplicasCount < *(p.newMS.Spec.Replicas) {
202202
scaleDownCount := *(p.newMS.Spec.Replicas) - newReplicasCount
203-
p.addNote(p.newMS, "%s", note)
203+
p.addNotef(p.newMS, "%s", note)
204204
log.V(5).Info(fmt.Sprintf("Setting scale down intent for MachineSet %s to %d replicas (-%d)", p.newMS.Name, newReplicasCount, scaleDownCount), "MachineSet", klog.KObj(p.newMS))
205205
p.scaleIntents[p.newMS.Name] = newReplicasCount
206206
}
207207
if newReplicasCount > *(p.newMS.Spec.Replicas) {
208208
scaleUpCount := newReplicasCount - *(p.newMS.Spec.Replicas)
209-
p.addNote(p.newMS, "%s", note)
209+
p.addNotef(p.newMS, "%s", note)
210210
log.V(5).Info(fmt.Sprintf("Setting scale up intent for MachineSet %s to %d replicas (+%d)", p.newMS.Name, newReplicasCount, scaleUpCount), "MachineSet", klog.KObj(p.newMS))
211211
p.scaleIntents[p.newMS.Name] = newReplicasCount
212212
}
@@ -383,9 +383,9 @@ func (p *rolloutPlanner) scaleDownOldMSs(ctx context.Context, totalScaleDownCoun
383383
// removing unavailable replicas when no available replicas exist on the MachineSet.
384384
// Note. In both cases overall availability is not impacted.
385385
if oldTotalAvailableReplicas == totalAvailableReplicas {
386-
p.addNote(oldMS, "scale down to align to existing Machines or scale down by removing unavailable replicas (and no available replicas exist on the MachineSet)")
386+
p.addNotef(oldMS, "scale down to align to existing Machines or scale down by removing unavailable replicas (and no available replicas exist on the MachineSet)")
387387
} else {
388-
p.addNote(oldMS, "%d available replicas > %d minimum available replicas", oldTotalAvailableReplicas, minAvailable)
388+
p.addNotef(oldMS, "%d available replicas > %d minimum available replicas", oldTotalAvailableReplicas, minAvailable)
389389
}
390390
log.V(5).Info(fmt.Sprintf("Setting scale down intent for MachineSet %s to %d replicas (-%d)", oldMS.Name, newScaleIntent, scaleDown), "MachineSet", klog.KObj(oldMS))
391391
p.scaleIntents[oldMS.Name] = newScaleIntent
@@ -455,7 +455,7 @@ func (p *rolloutPlanner) reconcileInPlaceUpdateIntent(ctx context.Context) error
455455
if oldMS.Annotations == nil {
456456
oldMS.Annotations = map[string]string{}
457457
}
458-
p.addNote(oldMS, "should scale down by moving Machines to MachineSet %s", p.newMS.Name)
458+
p.addNotef(oldMS, "should scale down by moving Machines to MachineSet %s", p.newMS.Name)
459459
oldMS.Annotations[clusterv1.MachineSetMoveMachinesToMachineSetAnnotation] = p.newMS.Name
460460
inPlaceUpdateCandidates.Insert(oldMS.Name)
461461
}
@@ -510,9 +510,9 @@ func (p *rolloutPlanner) reconcileInPlaceUpdateIntent(ctx context.Context) error
510510
// through remediation before creating additional machines)
511511
if newScaleUpCount == 0 && !p.scalingOrInPlaceUpdateInProgress(ctx) {
512512
newScaleUpCount = 1
513-
p.addNote(p.newMS, "surge 1 allowed to create availability for in-place updates")
513+
p.addNotef(p.newMS, "surge 1 allowed to create availability for in-place updates")
514514
} else {
515-
p.addNote(p.newMS, "surge %d dropped to prioritize in-place updates", maxSurgeUsed)
515+
p.addNotef(p.newMS, "surge %d dropped to prioritize in-place updates", maxSurgeUsed)
516516
}
517517

518518
newScaleIntent := ptr.Deref(p.newMS.Spec.Replicas, 0) + newScaleUpCount
@@ -617,7 +617,7 @@ func (p *rolloutPlanner) reconcileDeadlockBreaker(ctx context.Context) {
617617
}
618618

619619
newScaleIntent := max(ptr.Deref(oldMS.Spec.Replicas, 0)-1, 0)
620-
p.addNote(p.newMS, "scaling down by 1 to unblock rollout stuck due to unavailable Machine on oldMS")
620+
p.addNotef(p.newMS, "scaling down by 1 to unblock rollout stuck due to unavailable Machine on oldMS")
621621
log.Info(fmt.Sprintf("Setting scale down intent for MachineSet %s to %d replicas (-%d) to unblock rollout stuck due to unavailable Machine on oldMS", oldMS.Name, newScaleIntent, 1), "MachineSet", klog.KObj(oldMS))
622622
p.scaleIntents[oldMS.Name] = newScaleIntent
623623
return

util/collections/machine_filters_test.go

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ func TestAnd(t *testing.T) {
5555
t.Run("returns true if both given machine filters return true", func(t *testing.T) {
5656
g := NewWithT(t)
5757
m := &clusterv1.Machine{}
58-
g.Expect(collections.And(trueFilter, trueFilter)(m)).To(BeTrue())
58+
g.Expect(collections.And(trueFilter, trueFilter)(m)).To(BeTrue()) //nolint:gocritic
5959
})
6060
t.Run("returns false if either given machine filter returns false", func(t *testing.T) {
6161
g := NewWithT(t)
@@ -73,7 +73,7 @@ func TestOr(t *testing.T) {
7373
t.Run("returns false if both given machine filter returns false", func(t *testing.T) {
7474
g := NewWithT(t)
7575
m := &clusterv1.Machine{}
76-
g.Expect(collections.Or(falseFilter, falseFilter)(m)).To(BeFalse())
76+
g.Expect(collections.Or(falseFilter, falseFilter)(m)).To(BeFalse()) //nolint:gocritic
7777
})
7878
}
7979

util/version/version.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -130,7 +130,7 @@ func (v buildIdentifiers) compare(o buildIdentifiers) int {
130130
}
131131

132132
// if everything is equal till now the longer is greater
133-
if i == len(v) && i == len(o) { //nolint: gocritic
133+
if i == len(v) && i == len(o) {
134134
return 0
135135
} else if i == len(v) && i < len(o) {
136136
return -1

0 commit comments

Comments
 (0)