@@ -420,11 +420,93 @@ again:
420
420
421
421
insertk = add (unsafe .Pointer (insertb ), dataOffset + inserti * 4 )
422
422
// store new key at insert position
423
- if sys .PtrSize == 4 && t .key .kind & kindNoPointers == 0 && writeBarrier .enabled {
424
- writebarrierptr ((* uintptr )(insertk ), uintptr (key ))
425
- } else {
426
- * (* uint32 )(insertk ) = key
423
+ * (* uint32 )(insertk ) = key
424
+
425
+ h .count ++
426
+
427
+ done:
428
+ val := add (unsafe .Pointer (insertb ), dataOffset + bucketCnt * 4 + inserti * uintptr (t .valuesize ))
429
+ if h .flags & hashWriting == 0 {
430
+ throw ("concurrent map writes" )
431
+ }
432
+ h .flags &^= hashWriting
433
+ return val
434
+ }
435
+
436
+ func mapassign_fast32ptr (t * maptype , h * hmap , key unsafe.Pointer ) unsafe.Pointer {
437
+ if h == nil {
438
+ panic (plainError ("assignment to entry in nil map" ))
439
+ }
440
+ if raceenabled {
441
+ callerpc := getcallerpc ()
442
+ racewritepc (unsafe .Pointer (h ), callerpc , funcPC (mapassign_fast32 ))
443
+ }
444
+ if h .flags & hashWriting != 0 {
445
+ throw ("concurrent map writes" )
427
446
}
447
+ hash := t .key .alg .hash (noescape (unsafe .Pointer (& key )), uintptr (h .hash0 ))
448
+
449
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
450
+ h .flags |= hashWriting
451
+
452
+ if h .buckets == nil {
453
+ h .buckets = newobject (t .bucket ) // newarray(t.bucket, 1)
454
+ }
455
+
456
+ again:
457
+ bucket := hash & bucketMask (h .B )
458
+ if h .growing () {
459
+ growWork_fast32 (t , h , bucket )
460
+ }
461
+ b := (* bmap )(unsafe .Pointer (uintptr (h .buckets ) + bucket * uintptr (t .bucketsize )))
462
+
463
+ var insertb * bmap
464
+ var inserti uintptr
465
+ var insertk unsafe.Pointer
466
+
467
+ for {
468
+ for i := uintptr (0 ); i < bucketCnt ; i ++ {
469
+ if b .tophash [i ] == empty {
470
+ if insertb == nil {
471
+ inserti = i
472
+ insertb = b
473
+ }
474
+ continue
475
+ }
476
+ k := * ((* unsafe .Pointer )(add (unsafe .Pointer (b ), dataOffset + i * 4 )))
477
+ if k != key {
478
+ continue
479
+ }
480
+ inserti = i
481
+ insertb = b
482
+ goto done
483
+ }
484
+ ovf := b .overflow (t )
485
+ if ovf == nil {
486
+ break
487
+ }
488
+ b = ovf
489
+ }
490
+
491
+ // Did not find mapping for key. Allocate new cell & add entry.
492
+
493
+ // If we hit the max load factor or we have too many overflow buckets,
494
+ // and we're not already in the middle of growing, start growing.
495
+ if ! h .growing () && (overLoadFactor (h .count + 1 , h .B ) || tooManyOverflowBuckets (h .noverflow , h .B )) {
496
+ hashGrow (t , h )
497
+ goto again // Growing the table invalidates everything, so try again
498
+ }
499
+
500
+ if insertb == nil {
501
+ // all current buckets are full, allocate a new one.
502
+ insertb = h .newoverflow (t , b )
503
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
504
+ }
505
+ insertb .tophash [inserti & (bucketCnt - 1 )] = tophash (hash ) // mask inserti to avoid bounds checks
506
+
507
+ insertk = add (unsafe .Pointer (insertb ), dataOffset + inserti * 4 )
508
+ // store new key at insert position
509
+ * (* unsafe .Pointer )(insertk ) = key
428
510
429
511
h .count ++
430
512
@@ -510,18 +592,94 @@ again:
510
592
511
593
insertk = add (unsafe .Pointer (insertb ), dataOffset + inserti * 8 )
512
594
// store new key at insert position
513
- if t .key .kind & kindNoPointers == 0 && writeBarrier .enabled {
514
- if sys .PtrSize == 8 {
515
- writebarrierptr ((* uintptr )(insertk ), uintptr (key ))
516
- } else {
517
- // There are three ways to squeeze at least one 32 bit pointer into 64 bits.
518
- // Give up and call typedmemmove.
519
- typedmemmove (t .key , insertk , unsafe .Pointer (& key ))
595
+ * (* uint64 )(insertk ) = key
596
+
597
+ h .count ++
598
+
599
+ done:
600
+ val := add (unsafe .Pointer (insertb ), dataOffset + bucketCnt * 8 + inserti * uintptr (t .valuesize ))
601
+ if h .flags & hashWriting == 0 {
602
+ throw ("concurrent map writes" )
603
+ }
604
+ h .flags &^= hashWriting
605
+ return val
606
+ }
607
+
608
+ func mapassign_fast64ptr (t * maptype , h * hmap , key unsafe.Pointer ) unsafe.Pointer {
609
+ if h == nil {
610
+ panic (plainError ("assignment to entry in nil map" ))
611
+ }
612
+ if raceenabled {
613
+ callerpc := getcallerpc ()
614
+ racewritepc (unsafe .Pointer (h ), callerpc , funcPC (mapassign_fast64 ))
615
+ }
616
+ if h .flags & hashWriting != 0 {
617
+ throw ("concurrent map writes" )
618
+ }
619
+ hash := t .key .alg .hash (noescape (unsafe .Pointer (& key )), uintptr (h .hash0 ))
620
+
621
+ // Set hashWriting after calling alg.hash for consistency with mapassign.
622
+ h .flags |= hashWriting
623
+
624
+ if h .buckets == nil {
625
+ h .buckets = newobject (t .bucket ) // newarray(t.bucket, 1)
626
+ }
627
+
628
+ again:
629
+ bucket := hash & bucketMask (h .B )
630
+ if h .growing () {
631
+ growWork_fast64 (t , h , bucket )
632
+ }
633
+ b := (* bmap )(unsafe .Pointer (uintptr (h .buckets ) + bucket * uintptr (t .bucketsize )))
634
+
635
+ var insertb * bmap
636
+ var inserti uintptr
637
+ var insertk unsafe.Pointer
638
+
639
+ for {
640
+ for i := uintptr (0 ); i < bucketCnt ; i ++ {
641
+ if b .tophash [i ] == empty {
642
+ if insertb == nil {
643
+ insertb = b
644
+ inserti = i
645
+ }
646
+ continue
647
+ }
648
+ k := * ((* unsafe .Pointer )(add (unsafe .Pointer (b ), dataOffset + i * 8 )))
649
+ if k != key {
650
+ continue
651
+ }
652
+ insertb = b
653
+ inserti = i
654
+ goto done
520
655
}
521
- } else {
522
- * (* uint64 )(insertk ) = key
656
+ ovf := b .overflow (t )
657
+ if ovf == nil {
658
+ break
659
+ }
660
+ b = ovf
661
+ }
662
+
663
+ // Did not find mapping for key. Allocate new cell & add entry.
664
+
665
+ // If we hit the max load factor or we have too many overflow buckets,
666
+ // and we're not already in the middle of growing, start growing.
667
+ if ! h .growing () && (overLoadFactor (h .count + 1 , h .B ) || tooManyOverflowBuckets (h .noverflow , h .B )) {
668
+ hashGrow (t , h )
669
+ goto again // Growing the table invalidates everything, so try again
523
670
}
524
671
672
+ if insertb == nil {
673
+ // all current buckets are full, allocate a new one.
674
+ insertb = h .newoverflow (t , b )
675
+ inserti = 0 // not necessary, but avoids needlessly spilling inserti
676
+ }
677
+ insertb .tophash [inserti & (bucketCnt - 1 )] = tophash (hash ) // mask inserti to avoid bounds checks
678
+
679
+ insertk = add (unsafe .Pointer (insertb ), dataOffset + inserti * 8 )
680
+ // store new key at insert position
681
+ * (* unsafe .Pointer )(insertk ) = key
682
+
525
683
h .count ++
526
684
527
685
done:
0 commit comments