@@ -21,6 +21,7 @@ import (
21
21
"errors"
22
22
"fmt"
23
23
"testing"
24
+ "time"
24
25
25
26
"github.com/stretchr/testify/assert"
26
27
"github.com/stretchr/testify/require"
@@ -49,7 +50,7 @@ func TestController(t *testing.T) {
49
50
claim := createClaim (claimName , claimNamespace , driverName )
50
51
otherClaim := createClaim (claimName , claimNamespace , otherDriverName )
51
52
podName := "pod"
52
- podKey := "schedulingCtx:default/pod"
53
+ podSchedulingCtxKey := "schedulingCtx:default/pod"
53
54
pod := createPod (podName , claimNamespace , nil )
54
55
podClaimName := "my-pod-claim"
55
56
podSchedulingCtx := createPodSchedulingContexts (pod )
@@ -125,11 +126,15 @@ func TestController(t *testing.T) {
125
126
pod * corev1.Pod
126
127
schedulingCtx , expectedSchedulingCtx * resourceapi.PodSchedulingContext
127
128
claim , expectedClaim * resourceapi.ResourceClaim
128
- expectedError string
129
+ expectedWorkQueueState Mock [ string ]
129
130
}{
130
131
"invalid-key" : {
131
- key : "claim:x/y/z" ,
132
- expectedError : `unexpected key format: "x/y/z"` ,
132
+ key : "claim:x/y/z" ,
133
+ expectedWorkQueueState : Mock [string ]{
134
+ Failures : map [string ]int {
135
+ "claim:x/y/z" : 1 ,
136
+ },
137
+ },
133
138
},
134
139
"not-found" : {
135
140
key : "claim:default/claim" ,
@@ -154,7 +159,11 @@ func TestController(t *testing.T) {
154
159
claim : withDeallocate (withAllocate (claim )),
155
160
driver : m .expectDeallocate (map [string ]error {claimName : errors .New ("fake error" )}),
156
161
expectedClaim : withDeallocate (withAllocate (claim )),
157
- expectedError : "deallocate: fake error" ,
162
+ expectedWorkQueueState : Mock [string ]{
163
+ Failures : map [string ]int {
164
+ claimKey : 1 ,
165
+ },
166
+ },
158
167
},
159
168
160
169
// deletion time stamp set, our finalizer set, not allocated -> remove finalizer
@@ -170,7 +179,11 @@ func TestController(t *testing.T) {
170
179
claim : withFinalizer (withDeletionTimestamp (claim ), ourFinalizer ),
171
180
driver : m .expectDeallocate (map [string ]error {claimName : errors .New ("fake error" )}),
172
181
expectedClaim : withFinalizer (withDeletionTimestamp (claim ), ourFinalizer ),
173
- expectedError : "stop allocation: fake error" ,
182
+ expectedWorkQueueState : Mock [string ]{
183
+ Failures : map [string ]int {
184
+ claimKey : 1 ,
185
+ },
186
+ },
174
187
},
175
188
// deletion time stamp set, other finalizer set, not allocated -> do nothing
176
189
"deleted-finalizer-no-removal" : {
@@ -191,7 +204,11 @@ func TestController(t *testing.T) {
191
204
claim : withAllocate (withDeletionTimestamp (claim )),
192
205
driver : m .expectDeallocate (map [string ]error {claimName : errors .New ("fake error" )}),
193
206
expectedClaim : withAllocate (withDeletionTimestamp (claim )),
194
- expectedError : "deallocate: fake error" ,
207
+ expectedWorkQueueState : Mock [string ]{
208
+ Failures : map [string ]int {
209
+ claimKey : 1 ,
210
+ },
211
+ },
195
212
},
196
213
// deletion time stamp set, finalizer not set -> do nothing
197
214
"deleted-no-finalizer" : {
@@ -208,16 +225,23 @@ func TestController(t *testing.T) {
208
225
209
226
// pod with no claims -> shouldn't occur, check again anyway
210
227
"pod-nop" : {
211
- key : podKey ,
228
+ key : podSchedulingCtxKey ,
212
229
pod : pod ,
213
230
schedulingCtx : withSelectedNode (podSchedulingCtx ),
214
231
expectedSchedulingCtx : withSelectedNode (podSchedulingCtx ),
215
- expectedError : errPeriodic .Error (),
232
+ expectedWorkQueueState : Mock [string ]{
233
+ Later : []MockDelayedItem [string ]{
234
+ {
235
+ Item : podSchedulingCtxKey ,
236
+ Duration : time .Second * 30 ,
237
+ },
238
+ },
239
+ },
216
240
},
217
241
218
242
// no potential nodes -> shouldn't occur
219
243
"no-nodes" : {
220
- key : podKey ,
244
+ key : podSchedulingCtxKey ,
221
245
claim : claim ,
222
246
expectedClaim : claim ,
223
247
pod : podWithClaim ,
@@ -227,7 +251,7 @@ func TestController(t *testing.T) {
227
251
228
252
// potential nodes -> provide unsuitable nodes
229
253
"info" : {
230
- key : podKey ,
254
+ key : podSchedulingCtxKey ,
231
255
claim : claim ,
232
256
expectedClaim : claim ,
233
257
pod : podWithClaim ,
@@ -236,12 +260,19 @@ func TestController(t *testing.T) {
236
260
expectClaimParameters (map [string ]interface {}{claimName : 2 }).
237
261
expectUnsuitableNodes (map [string ][]string {podClaimName : unsuitableNodes }, nil ),
238
262
expectedSchedulingCtx : withUnsuitableNodes (withPotentialNodes (podSchedulingCtx )),
239
- expectedError : errPeriodic .Error (),
263
+ expectedWorkQueueState : Mock [string ]{
264
+ Later : []MockDelayedItem [string ]{
265
+ {
266
+ Item : podSchedulingCtxKey ,
267
+ Duration : time .Second * 30 ,
268
+ },
269
+ },
270
+ },
240
271
},
241
272
242
273
// potential nodes, selected node -> allocate
243
274
"allocate" : {
244
- key : podKey ,
275
+ key : podSchedulingCtxKey ,
245
276
claim : claim ,
246
277
expectedClaim : withReservedFor (withAllocate (claim ), pod ),
247
278
pod : podWithClaim ,
@@ -251,11 +282,18 @@ func TestController(t *testing.T) {
251
282
expectUnsuitableNodes (map [string ][]string {podClaimName : unsuitableNodes }, nil ).
252
283
expectAllocate (map [string ]allocate {claimName : {allocResult : & allocation , selectedNode : nodeName , allocErr : nil }}),
253
284
expectedSchedulingCtx : withUnsuitableNodes (withSelectedNode (withPotentialNodes (podSchedulingCtx ))),
254
- expectedError : errPeriodic .Error (),
285
+ expectedWorkQueueState : Mock [string ]{
286
+ Later : []MockDelayedItem [string ]{
287
+ {
288
+ Item : "schedulingCtx:default/pod" ,
289
+ Duration : time .Second * 30 ,
290
+ },
291
+ },
292
+ },
255
293
},
256
294
// potential nodes, selected node, all unsuitable -> update unsuitable nodes
257
295
"is-potential-node" : {
258
- key : podKey ,
296
+ key : podSchedulingCtxKey ,
259
297
claim : claim ,
260
298
expectedClaim : claim ,
261
299
pod : podWithClaim ,
@@ -264,11 +302,18 @@ func TestController(t *testing.T) {
264
302
expectClaimParameters (map [string ]interface {}{claimName : 2 }).
265
303
expectUnsuitableNodes (map [string ][]string {podClaimName : potentialNodes }, nil ),
266
304
expectedSchedulingCtx : withSpecificUnsuitableNodes (withSelectedNode (withPotentialNodes (podSchedulingCtx )), potentialNodes ),
267
- expectedError : errPeriodic .Error (),
305
+ expectedWorkQueueState : Mock [string ]{
306
+ Later : []MockDelayedItem [string ]{
307
+ {
308
+ Item : podSchedulingCtxKey ,
309
+ Duration : time .Second * 30 ,
310
+ },
311
+ },
312
+ },
268
313
},
269
314
// max potential nodes, other selected node, all unsuitable -> update unsuitable nodes with truncation at start
270
315
"is-potential-node-truncate-first" : {
271
- key : podKey ,
316
+ key : podSchedulingCtxKey ,
272
317
claim : claim ,
273
318
expectedClaim : claim ,
274
319
pod : podWithClaim ,
@@ -277,11 +322,18 @@ func TestController(t *testing.T) {
277
322
expectClaimParameters (map [string ]interface {}{claimName : 2 }).
278
323
expectUnsuitableNodes (map [string ][]string {podClaimName : append (maxNodes , nodeName )}, nil ),
279
324
expectedSchedulingCtx : withSpecificUnsuitableNodes (withSelectedNode (withSpecificPotentialNodes (podSchedulingCtx , maxNodes )), append (maxNodes [1 :], nodeName )),
280
- expectedError : errPeriodic .Error (),
325
+ expectedWorkQueueState : Mock [string ]{
326
+ Later : []MockDelayedItem [string ]{
327
+ {
328
+ Item : podSchedulingCtxKey ,
329
+ Duration : time .Second * 30 ,
330
+ },
331
+ },
332
+ },
281
333
},
282
334
// max potential nodes, other selected node, all unsuitable (but in reverse order) -> update unsuitable nodes with truncation at end
283
335
"pod-selected-is-potential-node-truncate-last" : {
284
- key : podKey ,
336
+ key : podSchedulingCtxKey ,
285
337
claim : claim ,
286
338
expectedClaim : claim ,
287
339
pod : podWithClaim ,
@@ -290,7 +342,14 @@ func TestController(t *testing.T) {
290
342
expectClaimParameters (map [string ]interface {}{claimName : 2 }).
291
343
expectUnsuitableNodes (map [string ][]string {podClaimName : append ([]string {nodeName }, maxNodes ... )}, nil ),
292
344
expectedSchedulingCtx : withSpecificUnsuitableNodes (withSelectedNode (withSpecificPotentialNodes (podSchedulingCtx , maxNodes )), append ([]string {nodeName }, maxNodes [:len (maxNodes )- 1 ]... )),
293
- expectedError : errPeriodic .Error (),
345
+ expectedWorkQueueState : Mock [string ]{
346
+ Later : []MockDelayedItem [string ]{
347
+ {
348
+ Item : podSchedulingCtxKey ,
349
+ Duration : time .Second * 30 ,
350
+ },
351
+ },
352
+ },
294
353
},
295
354
} {
296
355
t .Run (name , func (t * testing.T ) {
@@ -340,16 +399,11 @@ func TestController(t *testing.T) {
340
399
) {
341
400
t .Fatal ("could not sync caches" )
342
401
}
343
- _ , err := ctrl .(* controller ).syncKey (ctx , test .key )
344
- if err != nil && test .expectedError == "" {
345
- t .Fatalf ("unexpected error: %v" , err )
346
- }
347
- if err == nil && test .expectedError != "" {
348
- t .Fatalf ("did not get expected error %q" , test .expectedError )
349
- }
350
- if err != nil && err .Error () != test .expectedError {
351
- t .Fatalf ("expected error %q, got %q" , test .expectedError , err .Error ())
352
- }
402
+ var workQueueState Mock [string ]
403
+ c := ctrl .(* controller )
404
+ workQueueState .SyncOne (test .key , c .sync )
405
+ assert .Equal (t , test .expectedWorkQueueState , workQueueState )
406
+
353
407
claims , err := kubeClient .ResourceV1alpha3 ().ResourceClaims ("" ).List (ctx , metav1.ListOptions {})
354
408
require .NoError (t , err , "list claims" )
355
409
var expectedClaims []resourceapi.ResourceClaim
0 commit comments