@@ -374,12 +374,12 @@ describe('Time Profiler', () => {
374
374
375
375
const testStart = hrtime . bigint ( ) ;
376
376
const testDurationNanos = PROFILE_OPTIONS . durationMillis * 1_000_000 ;
377
+ const intervalNanos = PROFILE_OPTIONS . intervalMicros * 1_000 ;
377
378
setTimeout ( loop , 0 ) ;
378
379
379
380
function loop ( ) {
380
- const loopDurationNanos = PROFILE_OPTIONS . intervalMicros * 1_000 ;
381
381
const loopStart = hrtime . bigint ( ) ;
382
- while ( hrtime . bigint ( ) - loopStart < loopDurationNanos ) ;
382
+ while ( hrtime . bigint ( ) - loopStart < intervalNanos ) ;
383
383
if ( hrtime . bigint ( ) - testStart < testDurationNanos ) {
384
384
setTimeout ( loop , 0 ) ;
385
385
} else {
@@ -389,14 +389,45 @@ describe('Time Profiler', () => {
389
389
390
390
await done ;
391
391
392
- let asyncIdObserved = false ;
392
+ const asyncIdObserved = new Set ( ) ;
393
+ let sampleCount = 0 ;
394
+ let sampleCountWithAsyncID = 0 ;
393
395
time . stop ( false , ( { context} : GenerateTimeLabelsArgs ) => {
394
- if ( ! asyncIdObserved && typeof context ?. asyncId === 'number' ) {
395
- asyncIdObserved = context ?. asyncId !== - 1 ;
396
+ if ( typeof context ?. asyncId === 'number' ) {
397
+ asyncIdObserved . add ( context . asyncId ) ;
398
+ sampleCountWithAsyncID ++ ;
396
399
}
400
+ sampleCount ++ ;
397
401
return { } ;
398
402
} ) ;
399
- assert ( asyncIdObserved , 'Async ID was not observed' ) ;
403
+
404
+ // Below we set certain thresholds for test success. Note that in the extreme, the test could
405
+ // succeed even if we observed a single async ID with value other than -1. We are setting a
406
+ // somewhat higher bar, while still keeping it low enough to avoid flakiness.
407
+
408
+ const theoreticalSampleCount = testDurationNanos / intervalNanos ;
409
+ // In local tests on my Mac I get around 90% of theoretical number. Still, we'll accept anything
410
+ // more than the quarter of the theoretical count so we don't flake out when tested on a busy
411
+ // machine.
412
+ const expectedSampleCount = theoreticalSampleCount / 4 ;
413
+ assert (
414
+ sampleCount >= expectedSampleCount ,
415
+ `Observed ${ sampleCount } samples, expected at least ${ expectedSampleCount } `
416
+ ) ;
417
+ // We'd expect at least 1/12 of samples to have async IDs. This is a very low number, but
418
+ // anything higher makes ASAN tests flaky.
419
+ const expectedSamplesWithAsyncID = sampleCount / 12 ;
420
+ assert (
421
+ sampleCountWithAsyncID >= expectedSamplesWithAsyncID ,
422
+ `Observed ${ sampleCountWithAsyncID } samples with async IDs, expected ${ expectedSamplesWithAsyncID } `
423
+ ) ;
424
+ // Similarly, we'd expect that the number of unique async IDs is at least 60% of all observed
425
+ // samples with async IDs.
426
+ const expectedOccurrences = ( sampleCountWithAsyncID * 3 ) / 5 ;
427
+ assert (
428
+ asyncIdObserved . size >= expectedOccurrences ,
429
+ `Observed ${ asyncIdObserved . size } distinct async IDs, expected at least ${ expectedOccurrences } `
430
+ ) ;
400
431
} ) ;
401
432
402
433
describe ( 'profile (w/ stubs)' , ( ) => {
0 commit comments