@@ -67,17 +67,14 @@ use core::nonzero::NonZero;
67
67
pub struct RawTable < K , V > {
68
68
capacity : usize ,
69
69
size : usize ,
70
- hashes : Unique < Option < SafeHash > > ,
71
- // Because K/V do not appear directly in any of the types in the struct,
72
- // inform rustc that in fact instances of K and V are reachable from here.
73
- marker : marker:: PhantomData < ( K , V ) > ,
70
+ // NB. The table will probably need manual impls of Send and Sync if this
71
+ // field ever changes.
72
+ middle : Unique < ( K , V ) > ,
74
73
}
75
74
76
75
struct RawBucket < K , V > {
77
76
hash : * mut Option < SafeHash > ,
78
- key : * mut K ,
79
- val : * mut V ,
80
- _marker : marker:: PhantomData < ( K , V ) > ,
77
+ kval : * mut ( K , V ) ,
81
78
}
82
79
83
80
impl < K , V > Copy for RawBucket < K , V > { }
@@ -191,9 +188,7 @@ impl<K, V> RawBucket<K, V> {
191
188
unsafe fn offset ( self , count : isize ) -> RawBucket < K , V > {
192
189
RawBucket {
193
190
hash : self . hash . offset ( count) ,
194
- key : self . key . offset ( count) ,
195
- val : self . val . offset ( count) ,
196
- _marker : marker:: PhantomData ,
191
+ kval : self . kval . offset ( count) ,
197
192
}
198
193
}
199
194
}
@@ -221,6 +216,7 @@ impl<K, V> Put for RawTable<K, V> {}
221
216
impl < ' t , K , V > Put for & ' t mut RawTable < K , V > { }
222
217
impl < K , V , M : Put > Put for Bucket < K , V , M > { }
223
218
impl < K , V , M : Put > Put for FullBucket < K , V , M > { }
219
+
224
220
// Buckets hold references to the table.
225
221
impl < K , V , M , S > Bucket < K , V , M , S > {
226
222
/// Borrow a reference to the table.
@@ -264,7 +260,6 @@ impl<K, V, M> Bucket<K, V, M> where M: Borrow<RawTable<K, V>> {
264
260
capacity : capacity,
265
261
table : table,
266
262
} ;
267
-
268
263
if capacity == 0 {
269
264
Err ( bucket. unsafe_cast ( ) )
270
265
} else {
@@ -369,8 +364,7 @@ impl<K, V, M> EmptyBucket<K, V, M> where M: Borrow<RawTable<K, V>>, M: Put {
369
364
-> FullBucket < K , V , M > {
370
365
unsafe {
371
366
* self . raw . hash = Some ( hash) ;
372
- ptr:: write ( self . 0 . raw . key , key) ;
373
- ptr:: write ( self . 0 . raw . val , value) ;
367
+ ptr:: write ( self . raw . kval , ( key, value) ) ;
374
368
}
375
369
376
370
self . table . size += 1 ;
@@ -394,56 +388,49 @@ impl<'t, K, V, M: 't> FullBucket<K, V, M> where M: Borrow<RawTable<K, V>> {
394
388
395
389
/// Gets references to the key and value at a given index.
396
390
pub fn read ( & self ) -> ( & SafeHash , & K , & V ) {
397
- unsafe {
398
- ( & * ( self . 0 . raw . hash as * mut SafeHash ) ,
399
- & * self . 0 . raw . key ,
400
- & * self . 0 . raw . val )
401
- }
402
- }
403
- }
404
-
405
- impl < K , V , M > FullBucket < K , V , M > where M : Borrow < RawTable < K , V > > {
406
- /// Removes this bucket's key and value from the hashtable.
407
- ///
408
- /// This works similarly to `put`, building an `EmptyBucket` out of the
409
- /// taken bucket.
410
- pub fn take ( mut self ) -> ( EmptyBucket < K , V , M > , K , V ) {
411
- self . 0 . table . size -= 1 ;
412
-
413
- unsafe {
414
- * self . 0 . raw . hash = None ;
415
- let k = ptr:: read ( self . 0 . raw . key ) ;
416
- let v = ptr:: read ( self . 0 . raw . val ) ;
417
- ( Bucket ( self . 0 ) , k, v)
418
- }
419
- }
420
-
421
- /// Gets mutable references to the key and value at a given index.
422
- pub fn read_mut ( & mut self ) -> ( & mut SafeHash , & mut K , & mut V ) {
423
- unsafe {
424
- ( & mut * ( self . 0 . raw . hash as * mut SafeHash ) ,
425
- & mut * self . 0 . raw . key ,
426
- & mut * self . 0 . raw . val )
427
- }
391
+ let ( & ref h, & ( ref k, ref v) ) = unsafe {
392
+ ( & * ( self . raw . hash as * mut SafeHash ) , & * self . raw . kval )
393
+ } ;
394
+ ( h, k, v)
428
395
}
429
- }
430
396
431
- impl < ' t , K , V , M : ' t > FullBucket < K , V , M > where M : Borrow < RawTable < K , V > > {
432
397
/// Exchange a bucket state for immutable references into the table.
433
398
/// Because the underlying reference to the table is also consumed,
434
399
/// no further changes to the structure of the table are possible;
435
400
/// in exchange for this, the returned references have a longer lifetime
436
401
/// than the references returned by `read()`.
437
402
pub fn into_refs ( self ) -> ( & ' t K , & ' t V ) {
438
- self . 0 . raw . into_refs ( )
403
+ unsafe { ( & ( * self . raw . kval ) . 0 , & ( * self . raw . kval ) . 1 ) }
439
404
}
440
405
}
441
406
442
- impl < ' t , K , V , M : ' t > FullBucket < K , V , M > where M : Borrow < RawTable < K , V > > {
407
+ impl < ' t , K , V , M : ' t > FullBucket < K , V , M > where M : BorrowMut < RawTable < K , V > > {
408
+ /// Gets mutable references to the key and value at a given index.
409
+ pub fn read_mut ( & mut self ) -> ( & mut SafeHash , & mut K , & mut V ) {
410
+ let ( & mut ref mut h, & mut ( ref mut k, ref mut v) ) = unsafe {
411
+ ( & mut * ( self . raw . hash as * mut SafeHash ) , & mut * self . raw . kval )
412
+ } ;
413
+ ( h, k, v)
414
+ }
415
+
443
416
/// This works similarly to `into_refs`, exchanging a bucket state
444
417
/// for mutable references into the table.
445
418
pub fn into_mut_refs ( self ) -> ( & ' t mut K , & ' t mut V ) {
446
- self . 0 . raw . into_mut_refs ( )
419
+ unsafe { ( & mut ( * self . raw . kval ) . 0 , & mut ( * self . raw . kval ) . 1 ) }
420
+ }
421
+
422
+ /// Removes this bucket's key and value from the hashtable.
423
+ ///
424
+ /// This works similarly to `put`, building an `EmptyBucket` out of the
425
+ /// taken bucket.
426
+ pub fn take ( mut self ) -> ( EmptyBucket < K , V , M > , K , V ) {
427
+ self . table . size -= 1 ;
428
+
429
+ unsafe {
430
+ * self . raw . hash = None ;
431
+ let ( k, v) = ptr:: read ( self . raw . kval ) ;
432
+ ( self . unsafe_cast ( ) , k, v)
433
+ }
447
434
}
448
435
}
449
436
@@ -456,8 +443,7 @@ impl<K, V, M> GapThenFull<K, V, M> where M: Borrow<RawTable<K, V>> {
456
443
pub fn shift ( mut self ) -> Option < GapThenFull < K , V , M > > {
457
444
unsafe {
458
445
* self . gap . raw . hash = mem:: replace ( & mut * self . full . raw . hash , None ) ;
459
- copy_nonoverlapping_memory ( self . gap . 0 . raw . key , self . full . 0 . raw . key , 1 ) ;
460
- copy_nonoverlapping_memory ( self . gap . 0 . raw . val , self . full . 0 . raw . val , 1 ) ;
446
+ copy_nonoverlapping_memory ( self . gap . raw . kval , self . full . raw . kval , 1 ) ;
461
447
}
462
448
463
449
let Bucket { raw : prev_raw, idx : prev_idx, .. } = self . full ;
@@ -476,117 +462,34 @@ impl<K, V, M> GapThenFull<K, V, M> where M: Borrow<RawTable<K, V>> {
476
462
}
477
463
}
478
464
479
- /// Rounds up to a multiple of a power of two. Returns the closest multiple
480
- /// of `target_alignment` that is higher or equal to `unrounded`.
481
- ///
482
- /// # Panics
483
- ///
484
- /// Panics if `target_alignment` is not a power of two.
485
- fn round_up_to_next ( unrounded : usize , target_alignment : usize ) -> usize {
486
- assert ! ( target_alignment. is_power_of_two( ) ) ;
487
- ( unrounded + target_alignment - 1 ) & !( target_alignment - 1 )
488
- }
489
-
490
- #[ test]
491
- fn test_rounding ( ) {
492
- assert_eq ! ( round_up_to_next( 0 , 4 ) , 0 ) ;
493
- assert_eq ! ( round_up_to_next( 1 , 4 ) , 4 ) ;
494
- assert_eq ! ( round_up_to_next( 2 , 4 ) , 4 ) ;
495
- assert_eq ! ( round_up_to_next( 3 , 4 ) , 4 ) ;
496
- assert_eq ! ( round_up_to_next( 4 , 4 ) , 4 ) ;
497
- assert_eq ! ( round_up_to_next( 5 , 4 ) , 8 ) ;
498
- }
499
-
500
- // Returns a tuple of (key_offset, val_offset),
501
- // from the start of a mallocated array.
502
- fn calculate_offsets ( hashes_size : usize ,
503
- keys_size : usize , keys_align : usize ,
504
- vals_align : usize )
505
- -> ( usize , usize ) {
506
- let keys_offset = round_up_to_next ( hashes_size, keys_align) ;
507
- let end_of_keys = keys_offset + keys_size;
508
-
509
- let vals_offset = round_up_to_next ( end_of_keys, vals_align) ;
510
-
511
- ( keys_offset, vals_offset)
512
- }
513
-
514
- // Returns a tuple of (minimum required malloc alignment, hash_offset,
515
- // array_size), from the start of a mallocated array.
516
- fn calculate_allocation ( hash_size : usize , hash_align : usize ,
517
- keys_size : usize , keys_align : usize ,
518
- vals_size : usize , vals_align : usize )
519
- -> ( usize , usize , usize ) {
520
- let hash_offset = 0 ;
521
- let ( _, vals_offset) = calculate_offsets ( hash_size,
522
- keys_size, keys_align,
523
- vals_align) ;
524
- let end_of_vals = vals_offset + vals_size;
525
-
526
- let min_align = cmp:: max ( hash_align, cmp:: max ( keys_align, vals_align) ) ;
527
-
528
- ( min_align, hash_offset, end_of_vals)
529
- }
530
-
531
- #[ test]
532
- fn test_offset_calculation ( ) {
533
- assert_eq ! ( calculate_allocation( 128 , 8 , 15 , 1 , 4 , 4 ) , ( 8 , 0 , 148 ) ) ;
534
- assert_eq ! ( calculate_allocation( 3 , 1 , 2 , 1 , 1 , 1 ) , ( 1 , 0 , 6 ) ) ;
535
- assert_eq ! ( calculate_allocation( 6 , 2 , 12 , 4 , 24 , 8 ) , ( 8 , 0 , 48 ) ) ;
536
- assert_eq ! ( calculate_offsets( 128 , 15 , 1 , 4 ) , ( 128 , 144 ) ) ;
537
- assert_eq ! ( calculate_offsets( 3 , 2 , 1 , 1 ) , ( 3 , 5 ) ) ;
538
- assert_eq ! ( calculate_offsets( 6 , 12 , 4 , 8 ) , ( 8 , 24 ) ) ;
539
- }
540
-
541
465
impl < K , V > RawTable < K , V > {
542
466
/// Does not initialize the buckets.
543
- unsafe fn new_uninitialized ( capacity : uint ) -> RawTable < K , V > {
544
- if capacity == 0 {
545
- return RawTable {
546
- size : 0 ,
547
- capacity : 0 ,
548
- hashes : Unique :: new ( EMPTY as * mut u64 ) ,
549
- marker : marker:: PhantomData ,
467
+ pub fn new_uninitialized ( capacity : uint ) -> PartialRawTable < K , V > {
468
+ unsafe {
469
+ let table = if capacity == 0 {
470
+ RawTable {
471
+ size : 0 ,
472
+ capacity : 0 ,
473
+ middle : Unique :: new ( EMPTY as * mut _ ) ,
474
+ }
475
+ } else {
476
+ let hashes = allocate ( checked_size_generic :: < K , V > ( capacity) , align :: < K , V > ( ) ) ;
477
+ if hashes. is_null ( ) { :: alloc:: oom ( ) }
478
+
479
+ RawTable {
480
+ capacity : capacity,
481
+ size : 0 ,
482
+ middle : Unique ( ( hashes as * mut ( K , V ) ) . offset ( capacity as isize ) ) ,
483
+ }
550
484
} ;
551
- }
552
485
553
- // No need for `checked_mul` before a more restrictive check performed
554
- // later in this method.
555
- let hashes_size = capacity * size_of :: < Option < SafeHash > > ( ) ;
556
- let keys_size = capacity * size_of :: < K > ( ) ;
557
- let vals_size = capacity * size_of :: < V > ( ) ;
558
-
559
- // Allocating hashmaps is a little tricky. We need to allocate three
560
- // arrays, but since we know their sizes and alignments up front,
561
- // we just allocate a single array, and then have the subarrays
562
- // point into it.
563
- //
564
- // This is great in theory, but in practice getting the alignment
565
- // right is a little subtle. Therefore, calculating offsets has been
566
- // factored out into a different function.
567
- let ( malloc_alignment, hash_offset, size) =
568
- calculate_allocation (
569
- hashes_size, min_align_of :: < Option < SafeHash > > ( ) ,
570
- keys_size, min_align_of :: < K > ( ) ,
571
- vals_size, min_align_of :: < V > ( ) ) ;
572
-
573
- // One check for overflow that covers calculation and rounding of size.
574
- let size_of_bucket = size_of :: < Option < SafeHash > > ( ) . checked_add ( size_of :: < K > ( ) ) . unwrap ( )
575
- . checked_add ( size_of :: < V > ( ) ) . unwrap ( ) ;
576
- assert ! ( size >= capacity. checked_mul( size_of_bucket)
577
- . expect( "capacity overflow" ) ,
578
- "capacity overflow" ) ;
579
-
580
- let buffer = allocate ( size, malloc_alignment) ;
581
- if buffer. is_null ( ) { :: alloc:: oom ( ) }
582
-
583
- let hashes = buffer. offset ( hash_offset as isize ) as * mut Option < SafeHash > ;
584
-
585
- RawTable {
586
- capacity : capacity,
587
- size : 0 ,
588
- hashes : Unique :: new ( hashes) ,
589
- marker : marker:: PhantomData ,
486
+ PartialRawTable {
487
+ front : table. first_bucket_raw ( ) ,
488
+ back : table. first_bucket_raw ( ) ,
489
+ front_num : 0 ,
490
+ back_num : capacity,
491
+ table : table,
492
+ }
590
493
}
591
494
}
592
495
@@ -601,23 +504,12 @@ impl<K, V> RawTable<K, V> {
601
504
if self . capacity ( ) == 0 {
602
505
RawBucket {
603
506
hash : ptr:: null_mut ( ) ,
604
- key : ptr:: null_mut ( ) ,
605
- val : ptr:: null_mut ( ) ,
507
+ kval : ptr:: null_mut ( ) ,
606
508
}
607
509
} else {
608
- let hashes_size = self . capacity * size_of :: < Option < SafeHash > > ( ) ;
609
- let keys_size = self . capacity * size_of :: < K > ( ) ;
610
-
611
- let buffer = * self . hashes as * mut u8 ;
612
- let ( keys_offset, vals_offset) = calculate_offsets ( hashes_size,
613
- keys_size, min_align_of :: < K > ( ) ,
614
- min_align_of :: < V > ( ) ) ;
615
-
616
510
RawBucket {
617
- hash : * self . middle as * mut Option < SafeHash > ,
618
- key : buffer. offset ( keys_offset as isize ) as * mut K ,
619
- val : buffer. offset ( vals_offset as isize ) as * mut V ,
620
- _marker : marker:: PhantomData ,
511
+ hash : self . middle . ptr as * mut Option < SafeHash > ,
512
+ kval : self . middle . ptr . offset ( -( self . capacity as isize ) ) ,
621
513
}
622
514
}
623
515
}
@@ -632,14 +524,61 @@ impl<K, V> RawTable<K, V> {
632
524
pub fn size ( & self ) -> usize {
633
525
self . size
634
526
}
527
+ }
635
528
529
+ /// Rounds up to a multiple of a power of two. Returns the closest multiple
530
+ /// of `target_alignment` that is higher or equal to `unrounded`.
531
+ ///
532
+ /// # Panics
533
+ ///
534
+ /// Panics if `target_alignment` is not a power of two.
535
+ fn round_up_to_next ( unrounded : uint , target_alignment : uint ) -> uint {
536
+ assert ! ( target_alignment. is_power_of_two( ) ) ;
537
+ ( unrounded + target_alignment - 1 ) & !( target_alignment - 1 )
636
538
}
637
539
540
+ #[ test]
541
+ fn test_rounding ( ) {
542
+ assert_eq ! ( round_up_to_next( 0 , 4 ) , 0 ) ;
543
+ assert_eq ! ( round_up_to_next( 1 , 4 ) , 4 ) ;
544
+ assert_eq ! ( round_up_to_next( 2 , 4 ) , 4 ) ;
545
+ assert_eq ! ( round_up_to_next( 3 , 4 ) , 4 ) ;
546
+ assert_eq ! ( round_up_to_next( 4 , 4 ) , 4 ) ;
547
+ assert_eq ! ( round_up_to_next( 5 , 4 ) , 8 ) ;
548
+ assert_eq ! ( round_up_to_next( 5 , 8 ) , 8 ) ;
549
+ }
638
550
551
+ #[ inline]
552
+ fn size_generic < K , V > ( capacity : usize ) -> usize {
553
+ let hash_align = min_align_of :: < Option < SafeHash > > ( ) ;
554
+ round_up_to_next ( size_of :: < ( K , V ) > ( ) * capacity, hash_align) + size_of :: < SafeHash > ( ) * capacity
555
+ }
639
556
557
+ fn checked_size_generic < K , V > ( capacity : usize ) -> usize {
558
+ let size = size_generic :: < K , V > ( capacity) ;
559
+ let elem_size = size_of :: < ( K , V ) > ( ) + size_of :: < SafeHash > ( ) ;
560
+ assert ! ( size >= capacity. checked_mul( elem_size) . expect( "capacity overflow" ) ,
561
+ "capacity overflow" ) ;
562
+ size
563
+ }
564
+
565
+ #[ inline]
566
+ fn align < K , V > ( ) -> usize {
567
+ cmp:: max ( mem:: min_align_of :: < ( K , V ) > ( ) , mem:: min_align_of :: < u64 > ( ) )
568
+ }
569
+
570
+ /// A newtyped RawBucket. Not copyable.
571
+ pub struct RawFullBucket < K , V , M > ( RawBucket < K , V > ) ;
572
+
573
+ impl < ' t , K , V , M : ' t > RawFullBucket < K , V , M > where RawTable < K , V > : BorrowFrom < M > {
574
+ pub fn into_refs ( self ) -> ( & ' t K , & ' t V ) {
575
+ unsafe { ( & ( * self . 0 . kval ) . 0 , & ( * self . 0 . kval ) . 1 ) }
640
576
}
641
577
}
642
578
579
+ impl < ' t , K , V , M : ' t > RawFullBucket < K , V , M > where RawTable < K , V > : BorrowFromMut < M > {
580
+ pub fn into_mut_refs ( self ) -> ( & ' t mut K , & ' t mut V ) {
581
+ unsafe { ( & mut ( * self . 0 . kval ) . 0 , & mut ( * self . 0 . kval ) . 1 ) }
643
582
}
644
583
}
645
584
0 commit comments