@@ -20,6 +20,11 @@ extern "C" {
2020
2121jl_gc_num_t gc_num = {0 };
2222
23+ JL_DLLEXPORT uint64_t jl_gc_total_hrtime (void )
24+ {
25+ return gc_num .total_time ;
26+ }
27+
2328// =========================================================================== //
2429// GC Callbacks
2530// =========================================================================== //
@@ -485,10 +490,210 @@ JL_DLLEXPORT void jl_finalize(jl_value_t *o)
485490int gc_n_threads ;
486491jl_ptls_t * gc_all_tls_states ;
487492
493+ // =========================================================================== //
494+ // Allocation
495+ // =========================================================================== //
496+
497+ JL_DLLEXPORT void * jl_gc_alloc_typed (jl_ptls_t ptls , size_t sz , void * ty )
498+ {
499+ return jl_gc_alloc (ptls , sz , ty );
500+ }
501+
502+ JL_DLLEXPORT jl_value_t * jl_gc_allocobj (size_t sz )
503+ {
504+ jl_ptls_t ptls = jl_current_task -> ptls ;
505+ return jl_gc_alloc (ptls , sz , NULL );
506+ }
507+
508+ // allocation wrappers that save the size of allocations, to allow using
509+ // jl_gc_counted_* functions with a libc-compatible API.
510+
511+ JL_DLLEXPORT void * jl_malloc (size_t sz )
512+ {
513+ int64_t * p = (int64_t * )jl_gc_counted_malloc (sz + JL_SMALL_BYTE_ALIGNMENT );
514+ if (p == NULL )
515+ return NULL ;
516+ p [0 ] = sz ;
517+ return (void * )(p + 2 ); // assumes JL_SMALL_BYTE_ALIGNMENT == 16
518+ }
519+
520+ //_unchecked_calloc does not check for potential overflow of nm*sz
521+ STATIC_INLINE void * _unchecked_calloc (size_t nm , size_t sz ) {
522+ size_t nmsz = nm * sz ;
523+ int64_t * p = (int64_t * )jl_gc_counted_calloc (nmsz + JL_SMALL_BYTE_ALIGNMENT , 1 );
524+ if (p == NULL )
525+ return NULL ;
526+ p [0 ] = nmsz ;
527+ return (void * )(p + 2 ); // assumes JL_SMALL_BYTE_ALIGNMENT == 16
528+ }
529+
530+ JL_DLLEXPORT void * jl_calloc (size_t nm , size_t sz )
531+ {
532+ if (nm > SSIZE_MAX /sz - JL_SMALL_BYTE_ALIGNMENT )
533+ return NULL ;
534+ return _unchecked_calloc (nm , sz );
535+ }
536+
537+ JL_DLLEXPORT void jl_free (void * p )
538+ {
539+ if (p != NULL ) {
540+ int64_t * pp = (int64_t * )p - 2 ;
541+ size_t sz = pp [0 ];
542+ jl_gc_counted_free_with_size (pp , sz + JL_SMALL_BYTE_ALIGNMENT );
543+ }
544+ }
545+
546+ JL_DLLEXPORT void * jl_realloc (void * p , size_t sz )
547+ {
548+ int64_t * pp ;
549+ size_t szold ;
550+ if (p == NULL ) {
551+ pp = NULL ;
552+ szold = 0 ;
553+ }
554+ else {
555+ pp = (int64_t * )p - 2 ;
556+ szold = pp [0 ] + JL_SMALL_BYTE_ALIGNMENT ;
557+ }
558+ int64_t * pnew = (int64_t * )jl_gc_counted_realloc_with_old_size (pp , szold , sz + JL_SMALL_BYTE_ALIGNMENT );
559+ if (pnew == NULL )
560+ return NULL ;
561+ pnew [0 ] = sz ;
562+ return (void * )(pnew + 2 ); // assumes JL_SMALL_BYTE_ALIGNMENT == 16
563+ }
564+
565+ // allocator entry points
566+
567+ JL_DLLEXPORT jl_value_t * (jl_gc_alloc )(jl_ptls_t ptls , size_t sz , void * ty )
568+ {
569+ return jl_gc_alloc_ (ptls , sz , ty );
570+ }
571+
572+ // =========================================================================== //
573+ // Generic Memory
574+ // =========================================================================== //
575+
576+ size_t jl_genericmemory_nbytes (jl_genericmemory_t * m ) JL_NOTSAFEPOINT
577+ {
578+ const jl_datatype_layout_t * layout = ((jl_datatype_t * )jl_typetagof (m ))-> layout ;
579+ size_t sz = layout -> size * m -> length ;
580+ if (layout -> flags .arrayelem_isunion )
581+ // account for isbits Union array selector bytes
582+ sz += m -> length ;
583+ return sz ;
584+ }
585+
586+ // tracking Memorys with malloc'd storage
587+ void jl_gc_track_malloced_genericmemory (jl_ptls_t ptls , jl_genericmemory_t * m , int isaligned ){
588+ // This is **NOT** a GC safe point.
589+ mallocmemory_t * ma ;
590+ if (ptls -> gc_tls_common .heap .mafreelist == NULL ) {
591+ ma = (mallocmemory_t * )malloc_s (sizeof (mallocmemory_t ));
592+ }
593+ else {
594+ ma = ptls -> gc_tls_common .heap .mafreelist ;
595+ ptls -> gc_tls_common .heap .mafreelist = ma -> next ;
596+ }
597+ ma -> a = (jl_genericmemory_t * )((uintptr_t )m | !!isaligned );
598+ ma -> next = ptls -> gc_tls_common .heap .mallocarrays ;
599+ ptls -> gc_tls_common .heap .mallocarrays = ma ;
600+ }
601+
602+ // =========================================================================== //
603+ // GC Debug
604+ // =========================================================================== //
605+
606+ int gc_slot_to_fieldidx (void * obj , void * slot , jl_datatype_t * vt ) JL_NOTSAFEPOINT
607+ {
608+ int nf = (int )jl_datatype_nfields (vt );
609+ for (int i = 1 ; i < nf ; i ++ ) {
610+ if (slot < (void * )((char * )obj + jl_field_offset (vt , i )))
611+ return i - 1 ;
612+ }
613+ return nf - 1 ;
614+ }
615+
616+ int gc_slot_to_arrayidx (void * obj , void * _slot ) JL_NOTSAFEPOINT
617+ {
618+ char * slot = (char * )_slot ;
619+ jl_datatype_t * vt = (jl_datatype_t * )jl_typeof (obj );
620+ char * start = NULL ;
621+ size_t len = 0 ;
622+ size_t elsize = sizeof (void * );
623+ if (vt == jl_module_type ) {
624+ jl_module_t * m = (jl_module_t * )obj ;
625+ start = (char * )m -> usings .items ;
626+ len = module_usings_length (m );
627+ elsize = sizeof (struct _jl_module_using );
628+ }
629+ else if (vt == jl_simplevector_type ) {
630+ start = (char * )jl_svec_data (obj );
631+ len = jl_svec_len (obj );
632+ }
633+ if (slot < start || slot >= start + elsize * len )
634+ return -1 ;
635+ return (slot - start ) / elsize ;
636+ }
637+
638+ // =========================================================================== //
639+ // GC Control
640+ // =========================================================================== //
641+
642+ JL_DLLEXPORT uint32_t jl_get_gc_disable_counter (void ) {
643+ return jl_atomic_load_acquire (& jl_gc_disable_counter );
644+ }
645+
646+ JL_DLLEXPORT int jl_gc_is_enabled (void )
647+ {
648+ jl_ptls_t ptls = jl_current_task -> ptls ;
649+ return !ptls -> disable_gc ;
650+ }
651+
652+ int gc_logging_enabled = 0 ;
653+
654+ JL_DLLEXPORT void jl_enable_gc_logging (int enable ) {
655+ gc_logging_enabled = enable ;
656+ }
657+
658+ JL_DLLEXPORT int jl_is_gc_logging_enabled (void ) {
659+ return gc_logging_enabled ;
660+ }
661+
662+
663+ // collector entry point and control
664+ _Atomic(uint32_t ) jl_gc_disable_counter = 1 ;
665+
666+ JL_DLLEXPORT int jl_gc_enable (int on )
667+ {
668+ jl_ptls_t ptls = jl_current_task -> ptls ;
669+ int prev = !ptls -> disable_gc ;
670+ ptls -> disable_gc = (on == 0 );
671+ if (on && !prev ) {
672+ // disable -> enable
673+ if (jl_atomic_fetch_add (& jl_gc_disable_counter , -1 ) == 1 ) {
674+ gc_num .allocd += gc_num .deferred_alloc ;
675+ gc_num .deferred_alloc = 0 ;
676+ }
677+ }
678+ else if (prev && !on ) {
679+ // enable -> disable
680+ jl_atomic_fetch_add (& jl_gc_disable_counter , 1 );
681+ // check if the GC is running and wait for it to finish
682+ jl_gc_safepoint_ (ptls );
683+ }
684+ return prev ;
685+ }
686+
488687// =========================================================================== //
489688// MISC
490689// =========================================================================== //
491690
691+ JL_DLLEXPORT jl_weakref_t * jl_gc_new_weakref (jl_value_t * value )
692+ {
693+ jl_ptls_t ptls = jl_current_task -> ptls ;
694+ return jl_gc_new_weakref_th (ptls , value );
695+ }
696+
492697const uint64_t _jl_buff_tag [3 ] = {0x4eadc0004eadc000ull , 0x4eadc0004eadc000ull , 0x4eadc0004eadc000ull }; // aka 0xHEADER00
493698JL_DLLEXPORT uintptr_t jl_get_buff_tag (void ) JL_NOTSAFEPOINT
494699{
0 commit comments